[llvm] r319665 - [CodeGen] Unify MBB reference format in both MIR and debug output
Francis Visoiu Mistrih via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 4 09:18:56 PST 2017
Modified: llvm/trunk/test/CodeGen/X86/tbm_patterns.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/tbm_patterns.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/tbm_patterns.ll (original)
+++ llvm/trunk/test/CodeGen/X86/tbm_patterns.ll Mon Dec 4 09:18:51 2017
@@ -5,7 +5,7 @@
define i32 @test_x86_tbm_bextri_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bextr $3076, %edi, %eax # imm = 0xC04
; CHECK-NEXT: retq
%t0 = lshr i32 %a, 4
@@ -16,7 +16,7 @@ define i32 @test_x86_tbm_bextri_u32(i32
; Make sure we still use AH subreg trick for extracting bits 15:8
define i32 @test_x86_tbm_bextri_u32_subreg(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u32_subreg:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: movzbl %ah, %eax # NOREX
; CHECK-NEXT: retq
@@ -27,7 +27,7 @@ define i32 @test_x86_tbm_bextri_u32_subr
define i32 @test_x86_tbm_bextri_u32_m(i32* nocapture %a) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u32_m:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bextr $3076, (%rdi), %eax # imm = 0xC04
; CHECK-NEXT: retq
%t0 = load i32, i32* %a
@@ -38,7 +38,7 @@ define i32 @test_x86_tbm_bextri_u32_m(i3
define i32 @test_x86_tbm_bextri_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bextr $3076, %edi, %eax # imm = 0xC04
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -51,7 +51,7 @@ define i32 @test_x86_tbm_bextri_u32_z(i3
define i32 @test_x86_tbm_bextri_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shrl $4, %edi
; CHECK-NEXT: testl $4095, %edi # imm = 0xFFF
; CHECK-NEXT: cmovnel %edx, %esi
@@ -66,7 +66,7 @@ define i32 @test_x86_tbm_bextri_u32_z2(i
define i64 @test_x86_tbm_bextri_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bextr $3076, %edi, %eax # imm = 0xC04
; CHECK-NEXT: retq
%t0 = lshr i64 %a, 4
@@ -77,7 +77,7 @@ define i64 @test_x86_tbm_bextri_u64(i64
; Make sure we still use AH subreg trick for extracting bits 15:8
define i64 @test_x86_tbm_bextri_u64_subreg(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u64_subreg:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: movzbl %ah, %eax # NOREX
; CHECK-NEXT: retq
@@ -88,7 +88,7 @@ define i64 @test_x86_tbm_bextri_u64_subr
define i64 @test_x86_tbm_bextri_u64_m(i64* nocapture %a) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u64_m:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bextr $3076, (%rdi), %eax # imm = 0xC04
; CHECK-NEXT: retq
%t0 = load i64, i64* %a
@@ -99,7 +99,7 @@ define i64 @test_x86_tbm_bextri_u64_m(i6
define i64 @test_x86_tbm_bextri_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: bextr $3076, %edi, %eax # imm = 0xC04
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
@@ -112,7 +112,7 @@ define i64 @test_x86_tbm_bextri_u64_z(i6
define i64 @test_x86_tbm_bextri_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_bextri_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shrl $4, %edi
; CHECK-NEXT: testl $4095, %edi # imm = 0xFFF
; CHECK-NEXT: cmovneq %rdx, %rsi
@@ -127,7 +127,7 @@ define i64 @test_x86_tbm_bextri_u64_z2(i
define i32 @test_x86_tbm_blcfill_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blcfill_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcfill %edi, %eax
; CHECK-NEXT: retq
%t0 = add i32 %a, 1
@@ -137,7 +137,7 @@ define i32 @test_x86_tbm_blcfill_u32(i32
define i32 @test_x86_tbm_blcfill_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blcfill_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcfill %edi, %eax
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -150,7 +150,7 @@ define i32 @test_x86_tbm_blcfill_u32_z(i
define i32 @test_x86_tbm_blcfill_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcfill_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: testl %edi, %eax
@@ -166,7 +166,7 @@ define i32 @test_x86_tbm_blcfill_u32_z2(
define i64 @test_x86_tbm_blcfill_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blcfill_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcfill %rdi, %rax
; CHECK-NEXT: retq
%t0 = add i64 %a, 1
@@ -176,7 +176,7 @@ define i64 @test_x86_tbm_blcfill_u64(i64
define i64 @test_x86_tbm_blcfill_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blcfill_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcfill %rdi, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
@@ -189,7 +189,7 @@ define i64 @test_x86_tbm_blcfill_u64_z(i
define i64 @test_x86_tbm_blcfill_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcfill_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: leaq 1(%rdi), %rax
; CHECK-NEXT: testq %rdi, %rax
; CHECK-NEXT: cmovneq %rdx, %rsi
@@ -204,7 +204,7 @@ define i64 @test_x86_tbm_blcfill_u64_z2(
define i32 @test_x86_tbm_blci_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blci %edi, %eax
; CHECK-NEXT: retq
%t0 = add i32 1, %a
@@ -215,7 +215,7 @@ define i32 @test_x86_tbm_blci_u32(i32 %a
define i32 @test_x86_tbm_blci_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blci %edi, %eax
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -229,7 +229,7 @@ define i32 @test_x86_tbm_blci_u32_z(i32
define i32 @test_x86_tbm_blci_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: notl %eax
@@ -247,7 +247,7 @@ define i32 @test_x86_tbm_blci_u32_z2(i32
define i64 @test_x86_tbm_blci_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blci %rdi, %rax
; CHECK-NEXT: retq
%t0 = add i64 1, %a
@@ -258,7 +258,7 @@ define i64 @test_x86_tbm_blci_u64(i64 %a
define i64 @test_x86_tbm_blci_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blci %rdi, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
@@ -272,7 +272,7 @@ define i64 @test_x86_tbm_blci_u64_z(i64
define i64 @test_x86_tbm_blci_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: leaq 1(%rdi), %rax
; CHECK-NEXT: notq %rax
; CHECK-NEXT: orq %rdi, %rax
@@ -289,7 +289,7 @@ define i64 @test_x86_tbm_blci_u64_z2(i64
define i32 @test_x86_tbm_blci_u32_b(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u32_b:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blci %edi, %eax
; CHECK-NEXT: retq
%t0 = sub i32 -2, %a
@@ -299,7 +299,7 @@ define i32 @test_x86_tbm_blci_u32_b(i32
define i64 @test_x86_tbm_blci_u64_b(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u64_b:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blci %rdi, %rax
; CHECK-NEXT: retq
%t0 = sub i64 -2, %a
@@ -309,7 +309,7 @@ define i64 @test_x86_tbm_blci_u64_b(i64
define i32 @test_x86_tbm_blcic_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blcic_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcic %edi, %eax
; CHECK-NEXT: retq
%t0 = xor i32 %a, -1
@@ -320,7 +320,7 @@ define i32 @test_x86_tbm_blcic_u32(i32 %
define i32 @test_x86_tbm_blcic_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blcic_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcic %edi, %eax
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -334,7 +334,7 @@ define i32 @test_x86_tbm_blcic_u32_z(i32
define i32 @test_x86_tbm_blcic_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcic_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: notl %eax
; CHECK-NEXT: incl %edi
@@ -352,7 +352,7 @@ define i32 @test_x86_tbm_blcic_u32_z2(i3
define i64 @test_x86_tbm_blcic_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blcic_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcic %rdi, %rax
; CHECK-NEXT: retq
%t0 = xor i64 %a, -1
@@ -363,7 +363,7 @@ define i64 @test_x86_tbm_blcic_u64(i64 %
define i64 @test_x86_tbm_blcic_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blcic_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcic %rdi, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
@@ -377,7 +377,7 @@ define i64 @test_x86_tbm_blcic_u64_z(i64
define i64 @test_x86_tbm_blcic_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcic_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: notq %rax
; CHECK-NEXT: incq %rdi
@@ -395,7 +395,7 @@ define i64 @test_x86_tbm_blcic_u64_z2(i6
define i32 @test_x86_tbm_blcmsk_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blcmsk_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcmsk %edi, %eax
; CHECK-NEXT: retq
%t0 = add i32 %a, 1
@@ -405,7 +405,7 @@ define i32 @test_x86_tbm_blcmsk_u32(i32
define i32 @test_x86_tbm_blcmsk_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blcmsk_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcmsk %edi, %eax
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -418,7 +418,7 @@ define i32 @test_x86_tbm_blcmsk_u32_z(i3
define i32 @test_x86_tbm_blcmsk_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcmsk_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: xorl %edi, %eax
@@ -434,7 +434,7 @@ define i32 @test_x86_tbm_blcmsk_u32_z2(i
define i64 @test_x86_tbm_blcmsk_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blcmsk_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcmsk %rdi, %rax
; CHECK-NEXT: retq
%t0 = add i64 %a, 1
@@ -444,7 +444,7 @@ define i64 @test_x86_tbm_blcmsk_u64(i64
define i64 @test_x86_tbm_blcmsk_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blcmsk_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcmsk %rdi, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
@@ -457,7 +457,7 @@ define i64 @test_x86_tbm_blcmsk_u64_z(i6
define i64 @test_x86_tbm_blcmsk_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcmsk_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: leaq 1(%rdi), %rax
; CHECK-NEXT: xorq %rdi, %rax
; CHECK-NEXT: cmovneq %rdx, %rsi
@@ -472,7 +472,7 @@ define i64 @test_x86_tbm_blcmsk_u64_z2(i
define i32 @test_x86_tbm_blcs_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blcs_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcs %edi, %eax
; CHECK-NEXT: retq
%t0 = add i32 %a, 1
@@ -482,7 +482,7 @@ define i32 @test_x86_tbm_blcs_u32(i32 %a
define i32 @test_x86_tbm_blcs_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blcs_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcs %edi, %eax
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -495,7 +495,7 @@ define i32 @test_x86_tbm_blcs_u32_z(i32
define i32 @test_x86_tbm_blcs_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcs_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: orl %edi, %eax
@@ -511,7 +511,7 @@ define i32 @test_x86_tbm_blcs_u32_z2(i32
define i64 @test_x86_tbm_blcs_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blcs_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcs %rdi, %rax
; CHECK-NEXT: retq
%t0 = add i64 %a, 1
@@ -521,7 +521,7 @@ define i64 @test_x86_tbm_blcs_u64(i64 %a
define i64 @test_x86_tbm_blcs_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blcs_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blcs %rdi, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
@@ -534,7 +534,7 @@ define i64 @test_x86_tbm_blcs_u64_z(i64
define i64 @test_x86_tbm_blcs_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcs_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: leaq 1(%rdi), %rax
; CHECK-NEXT: orq %rdi, %rax
; CHECK-NEXT: cmovneq %rdx, %rsi
@@ -549,7 +549,7 @@ define i64 @test_x86_tbm_blcs_u64_z2(i64
define i32 @test_x86_tbm_blsfill_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blsfill_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsfill %edi, %eax
; CHECK-NEXT: retq
%t0 = add i32 %a, -1
@@ -559,7 +559,7 @@ define i32 @test_x86_tbm_blsfill_u32(i32
define i32 @test_x86_tbm_blsfill_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blsfill_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsfill %edi, %eax
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -572,7 +572,7 @@ define i32 @test_x86_tbm_blsfill_u32_z(i
define i32 @test_x86_tbm_blsfill_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blsfill_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: orl %edi, %eax
@@ -588,7 +588,7 @@ define i32 @test_x86_tbm_blsfill_u32_z2(
define i64 @test_x86_tbm_blsfill_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blsfill_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsfill %rdi, %rax
; CHECK-NEXT: retq
%t0 = add i64 %a, -1
@@ -598,7 +598,7 @@ define i64 @test_x86_tbm_blsfill_u64(i64
define i64 @test_x86_tbm_blsfill_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blsfill_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsfill %rdi, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
@@ -611,7 +611,7 @@ define i64 @test_x86_tbm_blsfill_u64_z(i
define i64 @test_x86_tbm_blsfill_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blsfill_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: leaq -1(%rdi), %rax
; CHECK-NEXT: orq %rdi, %rax
; CHECK-NEXT: cmovneq %rdx, %rsi
@@ -626,7 +626,7 @@ define i64 @test_x86_tbm_blsfill_u64_z2(
define i32 @test_x86_tbm_blsic_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blsic_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsic %edi, %eax
; CHECK-NEXT: retq
%t0 = xor i32 %a, -1
@@ -637,7 +637,7 @@ define i32 @test_x86_tbm_blsic_u32(i32 %
define i32 @test_x86_tbm_blsic_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blsic_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsic %edi, %eax
; CHECK-NEXT: cmovel %esi, %eax
; CHECK-NEXT: retq
@@ -651,7 +651,7 @@ define i32 @test_x86_tbm_blsic_u32_z(i32
define i32 @test_x86_tbm_blsic_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blsic_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: notl %eax
; CHECK-NEXT: decl %edi
@@ -669,7 +669,7 @@ define i32 @test_x86_tbm_blsic_u32_z2(i3
define i64 @test_x86_tbm_blsic_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_blsic_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsic %rdi, %rax
; CHECK-NEXT: retq
%t0 = xor i64 %a, -1
@@ -680,7 +680,7 @@ define i64 @test_x86_tbm_blsic_u64(i64 %
define i64 @test_x86_tbm_blsic_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_blsic_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: blsic %rdi, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
; CHECK-NEXT: retq
@@ -694,7 +694,7 @@ define i64 @test_x86_tbm_blsic_u64_z(i64
define i64 @test_x86_tbm_blsic_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blsic_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: notq %rax
; CHECK-NEXT: decq %rdi
@@ -712,7 +712,7 @@ define i64 @test_x86_tbm_blsic_u64_z2(i6
define i32 @test_x86_tbm_t1mskc_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_t1mskc_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: t1mskc %edi, %eax
; CHECK-NEXT: retq
%t0 = xor i32 %a, -1
@@ -723,7 +723,7 @@ define i32 @test_x86_tbm_t1mskc_u32(i32
define i32 @test_x86_tbm_t1mskc_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_t1mskc_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: t1mskc %edi, %eax
; CHECK-NEXT: testl %eax, %eax
; CHECK-NEXT: cmovel %esi, %eax
@@ -738,7 +738,7 @@ define i32 @test_x86_tbm_t1mskc_u32_z(i3
define i32 @test_x86_tbm_t1mskc_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_t1mskc_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: notl %eax
; CHECK-NEXT: incl %edi
@@ -756,7 +756,7 @@ define i32 @test_x86_tbm_t1mskc_u32_z2(i
define i64 @test_x86_tbm_t1mskc_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_t1mskc_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: t1mskc %rdi, %rax
; CHECK-NEXT: retq
%t0 = xor i64 %a, -1
@@ -767,7 +767,7 @@ define i64 @test_x86_tbm_t1mskc_u64(i64
define i64 @test_x86_tbm_t1mskc_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_t1mskc_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: t1mskc %rdi, %rax
; CHECK-NEXT: testq %rax, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
@@ -782,7 +782,7 @@ define i64 @test_x86_tbm_t1mskc_u64_z(i6
define i64 @test_x86_tbm_t1mskc_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_t1mskc_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: notq %rax
; CHECK-NEXT: incq %rdi
@@ -800,7 +800,7 @@ define i64 @test_x86_tbm_t1mskc_u64_z2(i
define i32 @test_x86_tbm_tzmsk_u32(i32 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_tzmsk_u32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: tzmsk %edi, %eax
; CHECK-NEXT: retq
%t0 = xor i32 %a, -1
@@ -811,7 +811,7 @@ define i32 @test_x86_tbm_tzmsk_u32(i32 %
define i32 @test_x86_tbm_tzmsk_u32_z(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_tzmsk_u32_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: tzmsk %edi, %eax
; CHECK-NEXT: testl %eax, %eax
; CHECK-NEXT: cmovel %esi, %eax
@@ -826,7 +826,7 @@ define i32 @test_x86_tbm_tzmsk_u32_z(i32
define i32 @test_x86_tbm_tzmsk_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_tzmsk_u32_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: notl %eax
; CHECK-NEXT: decl %edi
@@ -844,7 +844,7 @@ define i32 @test_x86_tbm_tzmsk_u32_z2(i3
define i64 @test_x86_tbm_tzmsk_u64(i64 %a) nounwind {
; CHECK-LABEL: test_x86_tbm_tzmsk_u64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: tzmsk %rdi, %rax
; CHECK-NEXT: retq
%t0 = xor i64 %a, -1
@@ -855,7 +855,7 @@ define i64 @test_x86_tbm_tzmsk_u64(i64 %
define i64 @test_x86_tbm_tzmsk_u64_z(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test_x86_tbm_tzmsk_u64_z:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: tzmsk %rdi, %rax
; CHECK-NEXT: testq %rax, %rax
; CHECK-NEXT: cmoveq %rsi, %rax
@@ -870,7 +870,7 @@ define i64 @test_x86_tbm_tzmsk_u64_z(i64
define i64 @test_x86_tbm_tzmsk_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_tzmsk_u64_z2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: notq %rax
; CHECK-NEXT: decq %rdi
@@ -888,7 +888,7 @@ define i64 @test_x86_tbm_tzmsk_u64_z2(i6
define i64 @test_and_large_constant_mask(i64 %x) {
; CHECK-LABEL: test_and_large_constant_mask:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: bextr $15872, %rdi, %rax # imm = 0x3E00
; CHECK-NEXT: retq
entry:
@@ -898,7 +898,7 @@ entry:
define i64 @test_and_large_constant_mask_load(i64* %x) {
; CHECK-LABEL: test_and_large_constant_mask_load:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: bextr $15872, (%rdi), %rax # imm = 0x3E00
; CHECK-NEXT: retq
entry:
Modified: llvm/trunk/test/CodeGen/X86/tls-pie.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/tls-pie.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/tls-pie.ll (original)
+++ llvm/trunk/test/CodeGen/X86/tls-pie.ll Mon Dec 4 09:18:51 2017
@@ -8,17 +8,17 @@
define i32 @f1() {
; X86-LABEL: f1:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl %gs:i at NTPOFF, %eax
; X86-NEXT: retl
;
; X32-LABEL: f1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl %fs:i at TPOFF, %eax
; X32-NEXT: retq
;
; X64-LABEL: f1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movl %fs:i at TPOFF, %eax
; X64-NEXT: retq
entry:
@@ -28,19 +28,19 @@ entry:
define i32* @f2() {
; X86-LABEL: f2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl %gs:0, %eax
; X86-NEXT: leal i at NTPOFF(%eax), %eax
; X86-NEXT: retl
;
; X32-LABEL: f2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl %fs:0, %eax
; X32-NEXT: leal i at TPOFF(%rax), %eax
; X32-NEXT: retq
;
; X64-LABEL: f2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq %fs:0, %rax
; X64-NEXT: leaq i at TPOFF(%rax), %rax
; X64-NEXT: retq
@@ -50,7 +50,7 @@ entry:
define i32 @f3() {
; X86-LABEL: f3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: calll .L2$pb
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: .L2$pb:
@@ -63,13 +63,13 @@ define i32 @f3() {
; X86-NEXT: retl
;
; X32-LABEL: f3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl i2@{{.*}}(%rip), %eax
; X32-NEXT: movl %fs:(%eax), %eax
; X32-NEXT: retq
;
; X64-LABEL: f3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq i2@{{.*}}(%rip), %rax
; X64-NEXT: movl %fs:(%rax), %eax
; X64-NEXT: retq
@@ -80,7 +80,7 @@ entry:
define i32* @f4() {
; X86-LABEL: f4:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: calll .L3$pb
; X86-NEXT: .cfi_adjust_cfa_offset 4
; X86-NEXT: .L3$pb:
@@ -93,13 +93,13 @@ define i32* @f4() {
; X86-NEXT: retl
;
; X32-LABEL: f4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl %fs:0, %eax
; X32-NEXT: addl i2@{{.*}}(%rip), %eax
; X32-NEXT: retq
;
; X64-LABEL: f4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq %fs:0, %rax
; X64-NEXT: addq i2@{{.*}}(%rip), %rax
; X64-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/tls-shrink-wrapping.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/tls-shrink-wrapping.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/tls-shrink-wrapping.ll (original)
+++ llvm/trunk/test/CodeGen/X86/tls-shrink-wrapping.ll Mon Dec 4 09:18:51 2017
@@ -37,7 +37,7 @@ if.end:
; CHECK: g: # @g
; CHECK-NEXT: .cfi_startproc
-; CHECK-NEXT: # BB#0: # %entry
+; CHECK-NEXT: # %bb.0: # %entry
; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset %rbp, -16
Modified: llvm/trunk/test/CodeGen/X86/trunc-ext-ld-st.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/trunc-ext-ld-st.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/trunc-ext-ld-st.ll (original)
+++ llvm/trunk/test/CodeGen/X86/trunc-ext-ld-st.ll Mon Dec 4 09:18:51 2017
@@ -5,7 +5,7 @@
; A single 16-bit load + a single 16-bit store
define void @load_2_i8(<2 x i8>* %A) {
; SSE2-LABEL: load_2_i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movzwl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -21,7 +21,7 @@ define void @load_2_i8(<2 x i8>* %A) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: load_2_i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; SSE41-NEXT: paddq {{.*}}(%rip), %xmm0
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -36,7 +36,7 @@ define void @load_2_i8(<2 x i8>* %A) {
; Read 32-bits
define void @load_2_i16(<2 x i16>* %A) {
; SSE2-LABEL: load_2_i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7]
@@ -47,7 +47,7 @@ define void @load_2_i16(<2 x i16>* %A)
; SSE2-NEXT: retq
;
; SSE41-LABEL: load_2_i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; SSE41-NEXT: paddq {{.*}}(%rip), %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -62,7 +62,7 @@ define void @load_2_i16(<2 x i16>* %A)
define void @load_2_i32(<2 x i32>* %A) {
; SSE2-LABEL: load_2_i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; SSE2-NEXT: paddd {{.*}}(%rip), %xmm0
@@ -71,7 +71,7 @@ define void @load_2_i32(<2 x i32>* %A)
; SSE2-NEXT: retq
;
; SSE41-LABEL: load_2_i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; SSE41-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -85,7 +85,7 @@ define void @load_2_i32(<2 x i32>* %A)
define void @load_4_i8(<4 x i8>* %A) {
; SSE2-LABEL: load_4_i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -97,7 +97,7 @@ define void @load_4_i8(<4 x i8>* %A) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: load_4_i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; SSE41-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -111,7 +111,7 @@ define void @load_4_i8(<4 x i8>* %A) {
define void @load_4_i16(<4 x i16>* %A) {
; SSE2-LABEL: load_4_i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: paddw {{.*}}(%rip), %xmm0
@@ -122,7 +122,7 @@ define void @load_4_i16(<4 x i16>* %A)
; SSE2-NEXT: retq
;
; SSE41-LABEL: load_4_i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; SSE41-NEXT: paddw {{.*}}(%rip), %xmm0
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -136,7 +136,7 @@ define void @load_4_i16(<4 x i16>* %A)
define void @load_8_i8(<8 x i8>* %A) {
; SSE2-LABEL: load_8_i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: paddb %xmm0, %xmm0
@@ -146,7 +146,7 @@ define void @load_8_i8(<8 x i8>* %A) {
; SSE2-NEXT: retq
;
; SSE41-LABEL: load_8_i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: packuswb %xmm0, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/trunc-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/trunc-store.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/trunc-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/trunc-store.ll Mon Dec 4 09:18:51 2017
@@ -28,14 +28,14 @@
define void @fn1() {
; CHECK-LABEL: fn1:
-; CHECK: # BB#0: # %for.cond
+; CHECK: # %bb.0: # %for.cond
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB0_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: movb $0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: cmpq $8, %rax
; CHECK-NEXT: jne .LBB0_1
-; CHECK-NEXT: # BB#2: # %middle.block
+; CHECK-NEXT: # %bb.2: # %middle.block
; CHECK-NEXT: retq
for.cond:
br label %vector.body
Modified: llvm/trunk/test/CodeGen/X86/trunc-to-bool.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/trunc-to-bool.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/trunc-to-bool.ll (original)
+++ llvm/trunk/test/CodeGen/X86/trunc-to-bool.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@
define zeroext i1 @test1(i32 %X) nounwind {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movb {{[0-9]+}}(%esp), %al
; CHECK-NEXT: andb $1, %al
; CHECK-NEXT: retl
@@ -16,12 +16,12 @@ define zeroext i1 @test1(i32 %X) nounwi
define i1 @test2(i32 %val, i32 %mask) nounwind {
; CHECK-LABEL: test2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: btl %ecx, %eax
; CHECK-NEXT: jae .LBB1_2
-; CHECK-NEXT: # BB#1: # %ret_true
+; CHECK-NEXT: # %bb.1: # %ret_true
; CHECK-NEXT: movb $1, %al
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB1_2: # %ret_false
@@ -40,11 +40,11 @@ ret_false:
define i32 @test3(i8* %ptr) nounwind {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: testb $1, (%eax)
; CHECK-NEXT: je .LBB2_2
-; CHECK-NEXT: # BB#1: # %cond_true
+; CHECK-NEXT: # %bb.1: # %cond_true
; CHECK-NEXT: movl $21, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB2_2: # %cond_false
@@ -61,10 +61,10 @@ cond_false:
define i32 @test4(i8* %ptr) nounwind {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: testb $1, {{[0-9]+}}(%esp)
; CHECK-NEXT: je .LBB3_2
-; CHECK-NEXT: # BB#1: # %cond_true
+; CHECK-NEXT: # %bb.1: # %cond_true
; CHECK-NEXT: movl $21, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB3_2: # %cond_false
@@ -80,7 +80,7 @@ cond_false:
define i32 @test5(double %d) nounwind {
; CHECK-LABEL: test5:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
; CHECK-NEXT: fnstcw (%esp)
@@ -92,7 +92,7 @@ define i32 @test5(double %d) nounwind {
; CHECK-NEXT: fldcw (%esp)
; CHECK-NEXT: testb $1, {{[0-9]+}}(%esp)
; CHECK-NEXT: je .LBB4_2
-; CHECK-NEXT: # BB#1: # %cond_true
+; CHECK-NEXT: # %bb.1: # %cond_true
; CHECK-NEXT: movl $21, %eax
; CHECK-NEXT: popl %ecx
; CHECK-NEXT: retl
Modified: llvm/trunk/test/CodeGen/X86/uint64-to-float.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/uint64-to-float.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/uint64-to-float.ll (original)
+++ llvm/trunk/test/CodeGen/X86/uint64-to-float.ll Mon Dec 4 09:18:51 2017
@@ -8,7 +8,7 @@
define float @test(i64 %a) nounwind {
; X86-LABEL: test:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
@@ -29,10 +29,10 @@ define float @test(i64 %a) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: testq %rdi, %rdi
; X64-NEXT: js .LBB0_1
-; X64-NEXT: # BB#2: # %entry
+; X64-NEXT: # %bb.2: # %entry
; X64-NEXT: cvtsi2ssq %rdi, %xmm0
; X64-NEXT: retq
; X64-NEXT: .LBB0_1:
Modified: llvm/trunk/test/CodeGen/X86/uint_to_fp-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/uint_to_fp-2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/uint_to_fp-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/uint_to_fp-2.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
; rdar://6504833
define float @test1(i32 %x) nounwind readnone {
; CHECK-LABEL: test1:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -24,7 +24,7 @@ entry:
; PR10802
define float @test2(<4 x i32> %x) nounwind readnone ssp {
; CHECK-LABEL: test2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: xorps %xmm1, %xmm1
; CHECK-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
Modified: llvm/trunk/test/CodeGen/X86/uint_to_fp-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/uint_to_fp-3.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/uint_to_fp-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/uint_to_fp-3.ll Mon Dec 4 09:18:51 2017
@@ -8,25 +8,25 @@
define <4 x float> @mask_ucvt_4i32_4f32(<4 x i32> %a) {
; X32-SSE-LABEL: mask_ucvt_4i32_4f32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: mask_ucvt_4i32_4f32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: mask_ucvt_4i32_4f32:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm0
; X64-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: mask_ucvt_4i32_4f32:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X64-AVX-NEXT: retq
@@ -37,7 +37,7 @@ define <4 x float> @mask_ucvt_4i32_4f32(
define <4 x double> @mask_ucvt_4i32_4f64(<4 x i32> %a) {
; X32-SSE-LABEL: mask_ucvt_4i32_4f64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: cvtdq2pd %xmm0, %xmm2
; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -46,13 +46,13 @@ define <4 x double> @mask_ucvt_4i32_4f64
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: mask_ucvt_4i32_4f64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
; X32-AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: mask_ucvt_4i32_4f64:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pand {{.*}}(%rip), %xmm0
; X64-SSE-NEXT: cvtdq2pd %xmm0, %xmm2
; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -61,7 +61,7 @@ define <4 x double> @mask_ucvt_4i32_4f64
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: mask_ucvt_4i32_4f64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; X64-AVX-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/uint_to_fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/uint_to_fp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/uint_to_fp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/uint_to_fp.ll Mon Dec 4 09:18:51 2017
@@ -5,7 +5,7 @@
define void @test(i32 %x, float* %y) nounwind {
; X32-LABEL: test:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: shrl $23, %ecx
@@ -14,7 +14,7 @@ define void @test(i32 %x, float* %y) nou
; X32-NEXT: retl
;
; X64-LABEL: test:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: shrl $23, %edi
; X64-NEXT: cvtsi2ssl %edi, %xmm0
; X64-NEXT: movss %xmm0, (%rsi)
Modified: llvm/trunk/test/CodeGen/X86/umul-with-overflow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/umul-with-overflow.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/umul-with-overflow.ll (original)
+++ llvm/trunk/test/CodeGen/X86/umul-with-overflow.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@ declare {i32, i1} @llvm.umul.with.overfl
define zeroext i1 @a(i32 %x) nounwind {
; X86-LABEL: a:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl $3, %ecx
; X86-NEXT: mull %ecx
@@ -14,7 +14,7 @@ define zeroext i1 @a(i32 %x) nounwind {
; X86-NEXT: retl
;
; X64-LABEL: a:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $3, %ecx
; X64-NEXT: movl %edi, %eax
; X64-NEXT: mull %ecx
@@ -27,14 +27,14 @@ define zeroext i1 @a(i32 %x) nounwind {
define i32 @test2(i32 %a, i32 %b) nounwind readnone {
; X86-LABEL: test2:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: addl %esi, %edi
; X64-NEXT: leal (%rdi,%rdi), %eax
@@ -48,7 +48,7 @@ entry:
define i32 @test3(i32 %a, i32 %b) nounwind readnone {
; X86-LABEL: test3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl $4, %ecx
@@ -56,7 +56,7 @@ define i32 @test3(i32 %a, i32 %b) nounwi
; X86-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rsi), %eax
Modified: llvm/trunk/test/CodeGen/X86/unaligned-32-byte-memops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/unaligned-32-byte-memops.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/unaligned-32-byte-memops.ll (original)
+++ llvm/trunk/test/CodeGen/X86/unaligned-32-byte-memops.ll Mon Dec 4 09:18:51 2017
@@ -7,18 +7,18 @@
define <8 x float> @load32bytes(<8 x float>* %Ap) {
; AVXSLOW-LABEL: load32bytes:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vmovaps (%rdi), %xmm0
; AVXSLOW-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: load32bytes:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vmovups (%rdi), %ymm0
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: load32bytes:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovups (%rdi), %ymm0
; AVX2-NEXT: retq
%A = load <8 x float>, <8 x float>* %Ap, align 16
@@ -29,20 +29,20 @@ define <8 x float> @load32bytes(<8 x flo
define void @store32bytes(<8 x float> %A, <8 x float>* %P) {
; AVXSLOW-LABEL: store32bytes:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vextractf128 $1, %ymm0, 16(%rdi)
; AVXSLOW-NEXT: vmovaps %xmm0, (%rdi)
; AVXSLOW-NEXT: vzeroupper
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: store32bytes:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vmovups %ymm0, (%rdi)
; AVXFAST-NEXT: vzeroupper
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: store32bytes:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovups %ymm0, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -54,18 +54,18 @@ define void @store32bytes(<8 x float> %A
define <8 x float> @combine_16_byte_loads_no_intrinsic(<4 x float>* %ptr) {
; AVXSLOW-LABEL: combine_16_byte_loads_no_intrinsic:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vmovups 48(%rdi), %xmm0
; AVXSLOW-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm0
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_no_intrinsic:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vmovups 48(%rdi), %ymm0
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_no_intrinsic:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovups 48(%rdi), %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 3
@@ -80,17 +80,17 @@ define <8 x float> @combine_16_byte_load
define <8 x float> @combine_16_byte_loads_aligned(<4 x float>* %ptr) {
; AVXSLOW-LABEL: combine_16_byte_loads_aligned:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vmovaps 48(%rdi), %ymm0
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_aligned:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vmovaps 48(%rdi), %ymm0
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_aligned:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps 48(%rdi), %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 3
@@ -105,18 +105,18 @@ define <8 x float> @combine_16_byte_load
define <8 x float> @combine_16_byte_loads_no_intrinsic_swap(<4 x float>* %ptr) {
; AVXSLOW-LABEL: combine_16_byte_loads_no_intrinsic_swap:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vmovups 64(%rdi), %xmm0
; AVXSLOW-NEXT: vinsertf128 $1, 80(%rdi), %ymm0, %ymm0
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_no_intrinsic_swap:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vmovups 64(%rdi), %ymm0
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_no_intrinsic_swap:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovups 64(%rdi), %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 4
@@ -133,7 +133,7 @@ define <8 x float> @combine_16_byte_load
define <4 x i64> @combine_16_byte_loads_i64(<2 x i64>* %ptr, <4 x i64> %x) {
; AVXSLOW-LABEL: combine_16_byte_loads_i64:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXSLOW-NEXT: vpaddq 96(%rdi), %xmm1, %xmm1
; AVXSLOW-NEXT: vpaddq 80(%rdi), %xmm0, %xmm0
@@ -141,7 +141,7 @@ define <4 x i64> @combine_16_byte_loads_
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_i64:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXFAST-NEXT: vpaddq 96(%rdi), %xmm1, %xmm1
; AVXFAST-NEXT: vpaddq 80(%rdi), %xmm0, %xmm0
@@ -149,7 +149,7 @@ define <4 x i64> @combine_16_byte_loads_
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddq 80(%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <2 x i64>, <2 x i64>* %ptr, i64 5
@@ -163,7 +163,7 @@ define <4 x i64> @combine_16_byte_loads_
define <8 x i32> @combine_16_byte_loads_i32(<4 x i32>* %ptr, <8 x i32> %x) {
; AVXSLOW-LABEL: combine_16_byte_loads_i32:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXSLOW-NEXT: vpaddd 112(%rdi), %xmm1, %xmm1
; AVXSLOW-NEXT: vpaddd 96(%rdi), %xmm0, %xmm0
@@ -171,7 +171,7 @@ define <8 x i32> @combine_16_byte_loads_
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_i32:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXFAST-NEXT: vpaddd 112(%rdi), %xmm1, %xmm1
; AVXFAST-NEXT: vpaddd 96(%rdi), %xmm0, %xmm0
@@ -179,7 +179,7 @@ define <8 x i32> @combine_16_byte_loads_
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddd 96(%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 6
@@ -193,7 +193,7 @@ define <8 x i32> @combine_16_byte_loads_
define <16 x i16> @combine_16_byte_loads_i16(<8 x i16>* %ptr, <16 x i16> %x) {
; AVXSLOW-LABEL: combine_16_byte_loads_i16:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXSLOW-NEXT: vpaddw 128(%rdi), %xmm1, %xmm1
; AVXSLOW-NEXT: vpaddw 112(%rdi), %xmm0, %xmm0
@@ -201,7 +201,7 @@ define <16 x i16> @combine_16_byte_loads
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_i16:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXFAST-NEXT: vpaddw 128(%rdi), %xmm1, %xmm1
; AVXFAST-NEXT: vpaddw 112(%rdi), %xmm0, %xmm0
@@ -209,7 +209,7 @@ define <16 x i16> @combine_16_byte_loads
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddw 112(%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <8 x i16>, <8 x i16>* %ptr, i64 7
@@ -223,7 +223,7 @@ define <16 x i16> @combine_16_byte_loads
define <32 x i8> @combine_16_byte_loads_i8(<16 x i8>* %ptr, <32 x i8> %x) {
; AVXSLOW-LABEL: combine_16_byte_loads_i8:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXSLOW-NEXT: vpaddb 144(%rdi), %xmm1, %xmm1
; AVXSLOW-NEXT: vpaddb 128(%rdi), %xmm0, %xmm0
@@ -231,7 +231,7 @@ define <32 x i8> @combine_16_byte_loads_
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_i8:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVXFAST-NEXT: vpaddb 144(%rdi), %xmm1, %xmm1
; AVXFAST-NEXT: vpaddb 128(%rdi), %xmm0, %xmm0
@@ -239,7 +239,7 @@ define <32 x i8> @combine_16_byte_loads_
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddb 128(%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <16 x i8>, <16 x i8>* %ptr, i64 8
@@ -253,19 +253,19 @@ define <32 x i8> @combine_16_byte_loads_
define <4 x double> @combine_16_byte_loads_double(<2 x double>* %ptr, <4 x double> %x) {
; AVXSLOW-LABEL: combine_16_byte_loads_double:
-; AVXSLOW: # BB#0:
+; AVXSLOW: # %bb.0:
; AVXSLOW-NEXT: vmovups 144(%rdi), %xmm1
; AVXSLOW-NEXT: vinsertf128 $1, 160(%rdi), %ymm1, %ymm1
; AVXSLOW-NEXT: vaddpd %ymm0, %ymm1, %ymm0
; AVXSLOW-NEXT: retq
;
; AVXFAST-LABEL: combine_16_byte_loads_double:
-; AVXFAST: # BB#0:
+; AVXFAST: # %bb.0:
; AVXFAST-NEXT: vaddpd 144(%rdi), %ymm0, %ymm0
; AVXFAST-NEXT: retq
;
; AVX2-LABEL: combine_16_byte_loads_double:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vaddpd 144(%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
%ptr1 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 9
Modified: llvm/trunk/test/CodeGen/X86/urem-i8-constant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/urem-i8-constant.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/urem-i8-constant.ll (original)
+++ llvm/trunk/test/CodeGen/X86/urem-i8-constant.ll Mon Dec 4 09:18:51 2017
@@ -5,7 +5,7 @@
define i8 @foo(i8 %tmp325) {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: imull $111, %ecx, %eax
; CHECK-NEXT: shrl $12, %eax
Modified: llvm/trunk/test/CodeGen/X86/urem-power-of-two.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/urem-power-of-two.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/urem-power-of-two.ll (original)
+++ llvm/trunk/test/CodeGen/X86/urem-power-of-two.ll Mon Dec 4 09:18:51 2017
@@ -6,14 +6,14 @@
define i64 @const_pow_2(i64 %x) {
; X86-LABEL: const_pow_2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: andl $31, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: retl
;
; X64-LABEL: const_pow_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andl $31, %edi
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
@@ -25,7 +25,7 @@ define i64 @const_pow_2(i64 %x) {
define i25 @shift_left_pow_2(i25 %x, i25 %y) {
; X86-LABEL: shift_left_pow_2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl $1, %eax
; X86-NEXT: shll %cl, %eax
@@ -34,7 +34,7 @@ define i25 @shift_left_pow_2(i25 %x, i25
; X86-NEXT: retl
;
; X64-LABEL: shift_left_pow_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $1, %eax
; X64-NEXT: movl %esi, %ecx
; X64-NEXT: shll %cl, %eax
@@ -50,7 +50,7 @@ define i25 @shift_left_pow_2(i25 %x, i25
define i16 @shift_right_pow_2(i16 %x, i16 %y) {
; X86-LABEL: shift_right_pow_2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: movl $32768, %eax # imm = 0x8000
; X86-NEXT: shrl %cl, %eax
@@ -60,7 +60,7 @@ define i16 @shift_right_pow_2(i16 %x, i1
; X86-NEXT: retl
;
; X64-LABEL: shift_right_pow_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $32768, %eax # imm = 0x8000
; X64-NEXT: movl %esi, %ecx
; X64-NEXT: shrl %cl, %eax
@@ -77,7 +77,7 @@ define i16 @shift_right_pow_2(i16 %x, i1
define i8 @and_pow_2(i8 %x, i8 %y) {
; X86-LABEL: and_pow_2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: andb $4, %cl
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
@@ -88,7 +88,7 @@ define i8 @and_pow_2(i8 %x, i8 %y) {
; X86-NEXT: retl
;
; X64-LABEL: and_pow_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andb $4, %sil
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
@@ -105,12 +105,12 @@ define i8 @and_pow_2(i8 %x, i8 %y) {
define <4 x i32> @vec_const_uniform_pow_2(<4 x i32> %x) {
; X86-LABEL: vec_const_uniform_pow_2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_const_uniform_pow_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
%urem = urem <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
@@ -119,12 +119,12 @@ define <4 x i32> @vec_const_uniform_pow_
define <4 x i32> @vec_const_nonuniform_pow_2(<4 x i32> %x) {
; X86-LABEL: vec_const_nonuniform_pow_2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_const_nonuniform_pow_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
%urem = urem <4 x i32> %x, <i32 2, i32 4, i32 8, i32 16>
Modified: llvm/trunk/test/CodeGen/X86/use-add-flags.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/use-add-flags.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/use-add-flags.ll (original)
+++ llvm/trunk/test/CodeGen/X86/use-add-flags.ll Mon Dec 4 09:18:51 2017
@@ -9,14 +9,14 @@
define i32 @test1(i32* %x, i32 %y, i32 %a, i32 %b) nounwind {
; LNX-LABEL: test1:
-; LNX: # BB#0:
+; LNX: # %bb.0:
; LNX-NEXT: addl (%rdi), %esi
; LNX-NEXT: cmovnsl %ecx, %edx
; LNX-NEXT: movl %edx, %eax
; LNX-NEXT: retq
;
; WIN-LABEL: test1:
-; WIN: # BB#0:
+; WIN: # %bb.0:
; WIN-NEXT: addl (%rcx), %edx
; WIN-NEXT: cmovnsl %r9d, %r8d
; WIN-NEXT: movl %r8d, %eax
@@ -35,10 +35,10 @@ declare void @foo(i32)
define void @test2(i32 %x) nounwind {
; LNX-LABEL: test2:
-; LNX: # BB#0:
+; LNX: # %bb.0:
; LNX-NEXT: testb $16, %dil
; LNX-NEXT: jne .LBB1_2
-; LNX-NEXT: # BB#1: # %true
+; LNX-NEXT: # %bb.1: # %true
; LNX-NEXT: pushq %rax
; LNX-NEXT: callq foo
; LNX-NEXT: popq %rax
@@ -46,11 +46,11 @@ define void @test2(i32 %x) nounwind {
; LNX-NEXT: retq
;
; WIN-LABEL: test2:
-; WIN: # BB#0:
+; WIN: # %bb.0:
; WIN-NEXT: subq $40, %rsp
; WIN-NEXT: testb $16, %cl
; WIN-NEXT: jne .LBB1_2
-; WIN-NEXT: # BB#1: # %true
+; WIN-NEXT: # %bb.1: # %true
; WIN-NEXT: callq foo
; WIN-NEXT: .LBB1_2: # %false
; WIN-NEXT: addq $40, %rsp
@@ -69,10 +69,10 @@ false:
define void @test3(i32 %x) nounwind {
; LNX-LABEL: test3:
-; LNX: # BB#0:
+; LNX: # %bb.0:
; LNX-NEXT: andl $16, %edi
; LNX-NEXT: jne .LBB2_2
-; LNX-NEXT: # BB#1: # %true
+; LNX-NEXT: # %bb.1: # %true
; LNX-NEXT: pushq %rax
; LNX-NEXT: callq foo
; LNX-NEXT: popq %rax
@@ -80,11 +80,11 @@ define void @test3(i32 %x) nounwind {
; LNX-NEXT: retq
;
; WIN-LABEL: test3:
-; WIN: # BB#0:
+; WIN: # %bb.0:
; WIN-NEXT: subq $40, %rsp
; WIN-NEXT: andl $16, %ecx
; WIN-NEXT: jne .LBB2_2
-; WIN-NEXT: # BB#1: # %true
+; WIN-NEXT: # %bb.1: # %true
; WIN-NEXT: callq foo
; WIN-NEXT: .LBB2_2: # %false
; WIN-NEXT: addq $40, %rsp
Modified: llvm/trunk/test/CodeGen/X86/v2f32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/v2f32.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/v2f32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/v2f32.ll Mon Dec 4 09:18:51 2017
@@ -5,14 +5,14 @@
; PR7518
define void @test1(<2 x float> %Q, float *%P2) nounwind {
; X64-LABEL: test1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; X64-NEXT: addss %xmm0, %xmm1
; X64-NEXT: movss %xmm1, (%rdi)
; X64-NEXT: retq
;
; X32-LABEL: test1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; X32-NEXT: addss %xmm0, %xmm1
@@ -27,12 +27,12 @@ define void @test1(<2 x float> %Q, float
define <2 x float> @test2(<2 x float> %Q, <2 x float> %R, <2 x float> *%P) nounwind {
; X64-LABEL: test2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addps %xmm1, %xmm0
; X64-NEXT: retq
;
; X32-LABEL: test2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: addps %xmm1, %xmm0
; X32-NEXT: retl
%Z = fadd <2 x float> %Q, %R
@@ -41,12 +41,12 @@ define <2 x float> @test2(<2 x float> %Q
define <2 x float> @test3(<4 x float> %A) nounwind {
; X64-LABEL: test3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addps %xmm0, %xmm0
; X64-NEXT: retq
;
; X32-LABEL: test3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: addps %xmm0, %xmm0
; X32-NEXT: retl
%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
@@ -56,12 +56,12 @@ define <2 x float> @test3(<4 x float> %A
define <2 x float> @test4(<2 x float> %A) nounwind {
; X64-LABEL: test4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addps %xmm0, %xmm0
; X64-NEXT: retq
;
; X32-LABEL: test4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: addps %xmm0, %xmm0
; X32-NEXT: retl
%C = fadd <2 x float> %A, %A
@@ -70,13 +70,13 @@ define <2 x float> @test4(<2 x float> %A
define <4 x float> @test5(<4 x float> %A) nounwind {
; X64-LABEL: test5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: addps %xmm0, %xmm0
; X64-NEXT: addps %xmm0, %xmm0
; X64-NEXT: retq
;
; X32-LABEL: test5:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: addps %xmm0, %xmm0
; X32-NEXT: addps %xmm0, %xmm0
; X32-NEXT: retl
Modified: llvm/trunk/test/CodeGen/X86/v4f32-immediate.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/v4f32-immediate.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/v4f32-immediate.ll (original)
+++ llvm/trunk/test/CodeGen/X86/v4f32-immediate.ll Mon Dec 4 09:18:51 2017
@@ -4,12 +4,12 @@
define <4 x float> @foo() {
; X32-LABEL: foo:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [3.223542e+00,2.300000e+00,1.200000e+00,1.000000e-01]
; X32-NEXT: retl
;
; X64-LABEL: foo:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [3.223542e+00,2.300000e+00,1.200000e+00,1.000000e-01]
; X64-NEXT: retq
ret <4 x float> <float 0x4009C9D0A0000000, float 0x4002666660000000, float 0x3FF3333340000000, float 0x3FB99999A0000000>
Modified: llvm/trunk/test/CodeGen/X86/v8i1-masks.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/v8i1-masks.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/v8i1-masks.ll (original)
+++ llvm/trunk/test/CodeGen/X86/v8i1-masks.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
; X32-LABEL: and_masks:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -20,7 +20,7 @@ define void @and_masks(<8 x float>* %a,
; X32-NEXT: retl
;
; X64-LABEL: and_masks:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovups (%rdi), %ymm0
; X64-NEXT: vmovups (%rsi), %ymm1
; X64-NEXT: vcmpltps %ymm0, %ymm1, %ymm1
@@ -44,7 +44,7 @@ define void @and_masks(<8 x float>* %a,
define void @neg_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
; X32-LABEL: neg_masks:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovups (%ecx), %ymm0
@@ -55,7 +55,7 @@ define void @neg_masks(<8 x float>* %a,
; X32-NEXT: retl
;
; X64-LABEL: neg_masks:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovups (%rsi), %ymm0
; X64-NEXT: vcmpnltps (%rdi), %ymm0, %ymm0
; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
Modified: llvm/trunk/test/CodeGen/X86/vaargs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vaargs.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vaargs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vaargs.ll Mon Dec 4 09:18:51 2017
@@ -8,7 +8,7 @@ target triple = "x86_64-apple-macosx10.9
define i32 @sum(i32 %count, ...) nounwind optsize ssp uwtable {
; CHECK: testb %al, %al
; CHECK-NEXT: je
-; CHECK-NEXT: ## BB#{{[0-9]+}}:
+; CHECK-NEXT: ## %bb.{{[0-9]+}}:
; CHECK-NEXT: vmovaps %xmm0, 48(%rsp)
; CHECK-NEXT: vmovaps %xmm1, 64(%rsp)
; CHECK-NEXT: vmovaps %xmm2, 80(%rsp)
Modified: llvm/trunk/test/CodeGen/X86/vaes-intrinsics-avx-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vaes-intrinsics-avx-x86.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vaes-intrinsics-avx-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vaes-intrinsics-avx-x86.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
; {vaes, avx}
define <4 x i64> @test_x86_aesni_aesenc_256(<4 x i64> %a0, <4 x i64> %a1) {
; VAES_AVX-LABEL: test_x86_aesni_aesenc_256:
-; VAES_AVX: # BB#0:
+; VAES_AVX: # %bb.0:
; VAES_AVX-NEXT: vaesenc %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0xdc,0xc1]
; VAES_AVX-NEXT: retl # encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.aesni.aesenc.256(<4 x i64> %a0, <4 x i64> %a1)
Modified: llvm/trunk/test/CodeGen/X86/vaes-intrinsics-avx512-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vaes-intrinsics-avx512-x86.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vaes-intrinsics-avx512-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vaes-intrinsics-avx512-x86.ll Mon Dec 4 09:18:51 2017
@@ -2,7 +2,7 @@
define <8 x i64> @test_x86_aesni_aesenc_512(<8 x i64> %a0, <8 x i64> %a1) {
; VAES_AVX512-LABEL: test_x86_aesni_aesenc_512:
-; VAES_AVX512: # BB#0:
+; VAES_AVX512: # %bb.0:
; VAES_AVX512-NEXT: vaesenc %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x48,0xdc,0xc1]
; VAES_AVX512-NEXT: retq # encoding: [0xc3]
%res = call <8 x i64> @llvm.x86.aesni.aesenc.512(<8 x i64> %a0, <8 x i64> %a1)
@@ -12,7 +12,7 @@ declare <8 x i64> @llvm.x86.aesni.aesenc
define <8 x i64> @test_x86_aesni_aesenclast_512(<8 x i64> %a0, <8 x i64> %a1) {
; VAES_AVX512-LABEL: test_x86_aesni_aesenclast_512:
-; VAES_AVX512: # BB#0:
+; VAES_AVX512: # %bb.0:
; VAES_AVX512-NEXT: vaesenclast %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x48,0xdd,0xc1]
; VAES_AVX512-NEXT: retq # encoding: [0xc3]
%res = call <8 x i64> @llvm.x86.aesni.aesenclast.512(<8 x i64> %a0, <8 x i64> %a1)
@@ -22,7 +22,7 @@ declare <8 x i64> @llvm.x86.aesni.aesenc
define <8 x i64> @test_x86_aesni_aesdec_512(<8 x i64> %a0, <8 x i64> %a1) {
; VAES_AVX512-LABEL: test_x86_aesni_aesdec_512:
-; VAES_AVX512: # BB#0:
+; VAES_AVX512: # %bb.0:
; VAES_AVX512-NEXT: vaesdec %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x48,0xde,0xc1]
; VAES_AVX512-NEXT: retq # encoding: [0xc3]
%res = call <8 x i64> @llvm.x86.aesni.aesdec.512(<8 x i64> %a0, <8 x i64> %a1)
@@ -32,7 +32,7 @@ declare <8 x i64> @llvm.x86.aesni.aesdec
define <8 x i64> @test_x86_aesni_aesdeclast_512(<8 x i64> %a0, <8 x i64> %a1) {
; VAES_AVX512-LABEL: test_x86_aesni_aesdeclast_512:
-; VAES_AVX512: # BB#0:
+; VAES_AVX512: # %bb.0:
; VAES_AVX512-NEXT: vaesdeclast %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x48,0xdf,0xc1]
; VAES_AVX512-NEXT: retq # encoding: [0xc3]
%res = call <8 x i64> @llvm.x86.aesni.aesdeclast.512(<8 x i64> %a0, <8 x i64> %a1)
Modified: llvm/trunk/test/CodeGen/X86/vaes-intrinsics-avx512vl-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vaes-intrinsics-avx512vl-x86.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vaes-intrinsics-avx512vl-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vaes-intrinsics-avx512vl-x86.ll Mon Dec 4 09:18:51 2017
@@ -2,7 +2,7 @@
define <2 x i64> @test_x86_aesni_aesenc(<2 x i64> %a0, <2 x i64> %a1) {
; VAES_AVX512VL-LABEL: test_x86_aesni_aesenc:
-; VAES_AVX512VL: # BB#0:
+; VAES_AVX512VL: # %bb.0:
; VAES_AVX512VL-NEXT: vaesenc %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xdc,0xc1]
; VAES_AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64> %a0, <2 x i64> %a1)
@@ -12,7 +12,7 @@ declare <2 x i64> @llvm.x86.aesni.aesenc
define <4 x i64> @test_x86_aesni_aesenc_256(<4 x i64> %a0, <4 x i64> %a1) {
; VAES_AVX512VL-LABEL: test_x86_aesni_aesenc_256:
-; VAES_AVX512VL: # BB#0:
+; VAES_AVX512VL: # %bb.0:
; VAES_AVX512VL-NEXT: vaesenc %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xdc,0xc1]
; VAES_AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.aesni.aesenc.256(<4 x i64> %a0, <4 x i64> %a1)
@@ -22,7 +22,7 @@ declare <4 x i64> @llvm.x86.aesni.aesenc
define <2 x i64> @test_x86_aesni_aesenclast(<2 x i64> %a0, <2 x i64> %a1) {
; VAES_AVX512VL-LABEL: test_x86_aesni_aesenclast:
-; VAES_AVX512VL: # BB#0:
+; VAES_AVX512VL: # %bb.0:
; VAES_AVX512VL-NEXT: vaesenclast %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xdd,0xc1]
; VAES_AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64> %a0, <2 x i64> %a1)
@@ -32,7 +32,7 @@ declare <2 x i64> @llvm.x86.aesni.aesenc
define <4 x i64> @test_x86_aesni_aesenclast_256(<4 x i64> %a0, <4 x i64> %a1) {
; VAES_AVX512VL-LABEL: test_x86_aesni_aesenclast_256:
-; VAES_AVX512VL: # BB#0:
+; VAES_AVX512VL: # %bb.0:
; VAES_AVX512VL-NEXT: vaesenclast %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xdd,0xc1]
; VAES_AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.aesni.aesenclast.256(<4 x i64> %a0, <4 x i64> %a1)
@@ -42,7 +42,7 @@ declare <4 x i64> @llvm.x86.aesni.aesenc
define <2 x i64> @test_x86_aesni_aesdec(<2 x i64> %a0, <2 x i64> %a1) {
; VAES_AVX512VL-LABEL: test_x86_aesni_aesdec:
-; VAES_AVX512VL: # BB#0:
+; VAES_AVX512VL: # %bb.0:
; VAES_AVX512VL-NEXT: vaesdec %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xde,0xc1]
; VAES_AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64> %a0, <2 x i64> %a1)
@@ -52,7 +52,7 @@ declare <2 x i64> @llvm.x86.aesni.aesdec
define <4 x i64> @test_x86_aesni_aesdec_256(<4 x i64> %a0, <4 x i64> %a1) {
; VAES_AVX512VL-LABEL: test_x86_aesni_aesdec_256:
-; VAES_AVX512VL: # BB#0:
+; VAES_AVX512VL: # %bb.0:
; VAES_AVX512VL-NEXT: vaesdec %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xde,0xc1]
; VAES_AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.aesni.aesdec.256(<4 x i64> %a0, <4 x i64> %a1)
@@ -62,7 +62,7 @@ declare <4 x i64> @llvm.x86.aesni.aesdec
define <2 x i64> @test_x86_aesni_aesdeclast(<2 x i64> %a0, <2 x i64> %a1) {
; VAES_AVX512VL-LABEL: test_x86_aesni_aesdeclast:
-; VAES_AVX512VL: # BB#0:
+; VAES_AVX512VL: # %bb.0:
; VAES_AVX512VL-NEXT: vaesdeclast %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xdf,0xc1]
; VAES_AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64> %a0, <2 x i64> %a1)
@@ -72,7 +72,7 @@ declare <2 x i64> @llvm.x86.aesni.aesdec
define <4 x i64> @test_x86_aesni_aesdeclast_256(<4 x i64> %a0, <4 x i64> %a1) {
; VAES_AVX512VL-LABEL: test_x86_aesni_aesdeclast_256:
-; VAES_AVX512VL: # BB#0:
+; VAES_AVX512VL: # %bb.0:
; VAES_AVX512VL-NEXT: vaesdeclast %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xdf,0xc1]
; VAES_AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.aesni.aesdeclast.256(<4 x i64> %a0, <4 x i64> %a1)
Modified: llvm/trunk/test/CodeGen/X86/var-permute-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/var-permute-128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/var-permute-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/var-permute-128.ll Mon Dec 4 09:18:51 2017
@@ -9,7 +9,7 @@
define <2 x i64> @var_shuffle_v2i64(<2 x i64> %v, <2 x i64> %indices) nounwind {
; SSSE3-LABEL: var_shuffle_v2i64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movq %xmm1, %rax
; SSSE3-NEXT: andl $1, %eax
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -22,7 +22,7 @@ define <2 x i64> @var_shuffle_v2i64(<2 x
; SSSE3-NEXT: retq
;
; AVX-LABEL: var_shuffle_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq %xmm1, %rax
; AVX-NEXT: andl $1, %eax
; AVX-NEXT: vpextrq $1, %xmm1, %rcx
@@ -43,7 +43,7 @@ define <2 x i64> @var_shuffle_v2i64(<2 x
define <4 x i32> @var_shuffle_v4i32(<4 x i32> %v, <4 x i32> %indices) nounwind {
; SSSE3-LABEL: var_shuffle_v4i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; SSSE3-NEXT: movq %xmm2, %rax
; SSSE3-NEXT: movq %rax, %rcx
@@ -66,7 +66,7 @@ define <4 x i32> @var_shuffle_v4i32(<4 x
; SSSE3-NEXT: retq
;
; AVX-LABEL: var_shuffle_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrq $1, %xmm1, %rax
; AVX-NEXT: movq %rax, %rcx
; AVX-NEXT: sarq $32, %rcx
@@ -100,7 +100,7 @@ define <4 x i32> @var_shuffle_v4i32(<4 x
define <8 x i16> @var_shuffle_v8i16(<8 x i16> %v, <8 x i16> %indices) nounwind {
; SSSE3-LABEL: var_shuffle_v8i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movd %xmm1, %r8d
; SSSE3-NEXT: pextrw $1, %xmm1, %r9d
; SSSE3-NEXT: pextrw $2, %xmm1, %r10d
@@ -144,7 +144,7 @@ define <8 x i16> @var_shuffle_v8i16(<8 x
; SSSE3-NEXT: retq
;
; AVXNOVLBW-LABEL: var_shuffle_v8i16:
-; AVXNOVLBW: # BB#0:
+; AVXNOVLBW: # %bb.0:
; AVXNOVLBW-NEXT: vmovd %xmm1, %eax
; AVXNOVLBW-NEXT: vpextrw $1, %xmm1, %r10d
; AVXNOVLBW-NEXT: vpextrw $2, %xmm1, %ecx
@@ -174,7 +174,7 @@ define <8 x i16> @var_shuffle_v8i16(<8 x
; AVXNOVLBW-NEXT: retq
;
; AVX512VLBW-LABEL: var_shuffle_v8i16:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpermw %xmm0, %xmm1, %xmm0
; AVX512VLBW-NEXT: retq
%index0 = extractelement <8 x i16> %indices, i32 0
@@ -206,13 +206,13 @@ define <8 x i16> @var_shuffle_v8i16(<8 x
define <16 x i8> @var_shuffle_v16i8(<16 x i8> %v, <16 x i8> %indices) nounwind {
; SSSE3-LABEL: var_shuffle_v16i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb %xmm0, %xmm1
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: var_shuffle_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%index0 = extractelement <16 x i8> %indices, i32 0
@@ -268,7 +268,7 @@ define <16 x i8> @var_shuffle_v16i8(<16
define <2 x double> @var_shuffle_v2f64(<2 x double> %v, <2 x i64> %indices) nounwind {
; SSSE3-LABEL: var_shuffle_v2f64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movq %xmm1, %rax
; SSSE3-NEXT: andl $1, %eax
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -280,7 +280,7 @@ define <2 x double> @var_shuffle_v2f64(<
; SSSE3-NEXT: retq
;
; AVX-LABEL: var_shuffle_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovq %xmm1, %rax
; AVX-NEXT: andl $1, %eax
; AVX-NEXT: vpextrq $1, %xmm1, %rcx
@@ -300,7 +300,7 @@ define <2 x double> @var_shuffle_v2f64(<
define <4 x float> @var_shuffle_v4f32(<4 x float> %v, <4 x i32> %indices) nounwind {
; SSSE3-LABEL: var_shuffle_v4f32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; SSSE3-NEXT: movq %xmm2, %rax
; SSSE3-NEXT: movq %rax, %rcx
@@ -323,7 +323,7 @@ define <4 x float> @var_shuffle_v4f32(<4
; SSSE3-NEXT: retq
;
; AVX-LABEL: var_shuffle_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrq $1, %xmm1, %rax
; AVX-NEXT: movq %rax, %rcx
; AVX-NEXT: sarq $32, %rcx
Modified: llvm/trunk/test/CodeGen/X86/var-permute-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/var-permute-256.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/var-permute-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/var-permute-256.ll Mon Dec 4 09:18:51 2017
@@ -8,7 +8,7 @@
define <4 x i64> @var_shuffle_v4i64(<4 x i64> %v, <4 x i64> %indices) nounwind {
; AVX1-LABEL: var_shuffle_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -35,7 +35,7 @@ define <4 x i64> @var_shuffle_v4i64(<4 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
@@ -62,7 +62,7 @@ define <4 x i64> @var_shuffle_v4i64(<4 x
; AVX2-NEXT: retq
;
; AVX512F-LABEL: var_shuffle_v4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
@@ -89,12 +89,12 @@ define <4 x i64> @var_shuffle_v4i64(<4 x
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_shuffle_v4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512VLBW-LABEL: var_shuffle_v4i64:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpermpd %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%index0 = extractelement <4 x i64> %indices, i32 0
@@ -114,7 +114,7 @@ define <4 x i64> @var_shuffle_v4i64(<4 x
define <8 x i32> @var_shuffle_v8i32(<8 x i32> %v, <8 x i32> %indices) nounwind {
; AVX1-LABEL: var_shuffle_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -156,7 +156,7 @@ define <8 x i32> @var_shuffle_v8i32(<8 x
; AVX1-NEXT: retq
;
; INT256-LABEL: var_shuffle_v8i32:
-; INT256: # BB#0:
+; INT256: # %bb.0:
; INT256-NEXT: vpermps %ymm0, %ymm1, %ymm0
; INT256-NEXT: retq
%index0 = extractelement <8 x i32> %indices, i32 0
@@ -188,7 +188,7 @@ define <8 x i32> @var_shuffle_v8i32(<8 x
define <16 x i16> @var_shuffle_v16i16(<16 x i16> %v, <16 x i16> %indices) nounwind {
; AVX1-LABEL: var_shuffle_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -251,7 +251,7 @@ define <16 x i16> @var_shuffle_v16i16(<1
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
@@ -314,7 +314,7 @@ define <16 x i16> @var_shuffle_v16i16(<1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: var_shuffle_v16i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
@@ -377,7 +377,7 @@ define <16 x i16> @var_shuffle_v16i16(<1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_shuffle_v16i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: pushq %rbp
; AVX512VL-NEXT: movq %rsp, %rbp
; AVX512VL-NEXT: andq $-32, %rsp
@@ -440,7 +440,7 @@ define <16 x i16> @var_shuffle_v16i16(<1
; AVX512VL-NEXT: retq
;
; AVX512VLBW-LABEL: var_shuffle_v16i16:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpermw %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%index0 = extractelement <16 x i16> %indices, i32 0
@@ -496,7 +496,7 @@ define <16 x i16> @var_shuffle_v16i16(<1
define <32 x i8> @var_shuffle_v32i8(<32 x i8> %v, <32 x i8> %indices) nounwind {
; AVX1-LABEL: var_shuffle_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -624,7 +624,7 @@ define <32 x i8> @var_shuffle_v32i8(<32
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
@@ -752,7 +752,7 @@ define <32 x i8> @var_shuffle_v32i8(<32
; AVX2-NEXT: retq
;
; AVX512F-LABEL: var_shuffle_v32i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
@@ -880,7 +880,7 @@ define <32 x i8> @var_shuffle_v32i8(<32
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_shuffle_v32i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: pushq %rbp
; AVX512VL-NEXT: movq %rsp, %rbp
; AVX512VL-NEXT: andq $-32, %rsp
@@ -1008,7 +1008,7 @@ define <32 x i8> @var_shuffle_v32i8(<32
; AVX512VL-NEXT: retq
;
; VBMI-LABEL: var_shuffle_v32i8:
-; VBMI: # BB#0:
+; VBMI: # %bb.0:
; VBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0
; VBMI-NEXT: retq
%index0 = extractelement <32 x i8> %indices, i32 0
@@ -1112,7 +1112,7 @@ define <32 x i8> @var_shuffle_v32i8(<32
define <4 x double> @var_shuffle_v4f64(<4 x double> %v, <4 x i64> %indices) nounwind {
; AVX1-LABEL: var_shuffle_v4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -1137,7 +1137,7 @@ define <4 x double> @var_shuffle_v4f64(<
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_v4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
@@ -1162,7 +1162,7 @@ define <4 x double> @var_shuffle_v4f64(<
; AVX2-NEXT: retq
;
; AVX512F-LABEL: var_shuffle_v4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
@@ -1187,12 +1187,12 @@ define <4 x double> @var_shuffle_v4f64(<
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_shuffle_v4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermpd %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512VLBW-LABEL: var_shuffle_v4f64:
-; AVX512VLBW: # BB#0:
+; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpermpd %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%index0 = extractelement <4 x i64> %indices, i32 0
@@ -1212,7 +1212,7 @@ define <4 x double> @var_shuffle_v4f64(<
define <8 x float> @var_shuffle_v8f32(<8 x float> %v, <8 x i32> %indices) nounwind {
; AVX1-LABEL: var_shuffle_v8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -1254,7 +1254,7 @@ define <8 x float> @var_shuffle_v8f32(<8
; AVX1-NEXT: retq
;
; INT256-LABEL: var_shuffle_v8f32:
-; INT256: # BB#0:
+; INT256: # %bb.0:
; INT256-NEXT: vpermps %ymm0, %ymm1, %ymm0
; INT256-NEXT: retq
%index0 = extractelement <8 x i32> %indices, i32 0
Modified: llvm/trunk/test/CodeGen/X86/var-permute-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/var-permute-512.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/var-permute-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/var-permute-512.ll Mon Dec 4 09:18:51 2017
@@ -5,7 +5,7 @@
define <8 x i64> @var_shuffle_v8i64(<8 x i64> %v, <8 x i64> %indices) nounwind {
; AVX512-LABEL: var_shuffle_v8i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%index0 = extractelement <8 x i64> %indices, i32 0
@@ -37,7 +37,7 @@ define <8 x i64> @var_shuffle_v8i64(<8 x
define <16 x i32> @var_shuffle_v16i32(<16 x i32> %v, <16 x i32> %indices) nounwind {
; AVX512-LABEL: var_shuffle_v16i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpermps %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%index0 = extractelement <16 x i32> %indices, i32 0
@@ -93,7 +93,7 @@ define <16 x i32> @var_shuffle_v16i32(<1
define <32 x i16> @var_shuffle_v32i16(<32 x i16> %v, <32 x i16> %indices) nounwind {
; NOBW-LABEL: var_shuffle_v32i16:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: pushq %rbp
; NOBW-NEXT: movq %rsp, %rbp
; NOBW-NEXT: andq $-64, %rsp
@@ -271,7 +271,7 @@ define <32 x i16> @var_shuffle_v32i16(<3
; NOBW-NEXT: retq
;
; AVX512BW-LABEL: var_shuffle_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%index0 = extractelement <32 x i16> %indices, i32 0
@@ -375,7 +375,7 @@ define <32 x i16> @var_shuffle_v32i16(<3
define <64 x i8> @var_shuffle_v64i8(<64 x i8> %v, <64 x i8> %indices) nounwind {
; NOBW-LABEL: var_shuffle_v64i8:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: pushq %rbp
; NOBW-NEXT: movq %rsp, %rbp
; NOBW-NEXT: andq $-64, %rsp
@@ -777,7 +777,7 @@ define <64 x i8> @var_shuffle_v64i8(<64
; NOBW-NEXT: retq
;
; VBMI-LABEL: var_shuffle_v64i8:
-; VBMI: # BB#0:
+; VBMI: # %bb.0:
; VBMI-NEXT: vpermb %zmm0, %zmm1, %zmm0
; VBMI-NEXT: retq
%index0 = extractelement <64 x i8> %indices, i32 0
@@ -977,7 +977,7 @@ define <64 x i8> @var_shuffle_v64i8(<64
define <8 x double> @var_shuffle_v8f64(<8 x double> %v, <8 x i64> %indices) nounwind {
; AVX512-LABEL: var_shuffle_v8f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%index0 = extractelement <8 x i64> %indices, i32 0
@@ -1009,7 +1009,7 @@ define <8 x double> @var_shuffle_v8f64(<
define <16 x float> @var_shuffle_v16f32(<16 x float> %v, <16 x i32> %indices) nounwind {
; AVX512-LABEL: var_shuffle_v16f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpermps %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%index0 = extractelement <16 x i32> %indices, i32 0
Modified: llvm/trunk/test/CodeGen/X86/vec-copysign-avx512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec-copysign-avx512.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec-copysign-avx512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec-copysign-avx512.ll Mon Dec 4 09:18:51 2017
@@ -4,14 +4,14 @@
define <4 x float> @v4f32(<4 x float> %a, <4 x float> %b) nounwind {
; AVX512VL-LABEL: v4f32:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm1, %xmm1
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm0, %xmm0
; AVX512VL-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512VLDQ-LABEL: v4f32:
-; AVX512VLDQ: ## BB#0:
+; AVX512VLDQ: ## %bb.0:
; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to4}, %xmm1, %xmm1
; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to4}, %xmm0, %xmm0
; AVX512VLDQ-NEXT: vorps %xmm1, %xmm0, %xmm0
@@ -22,14 +22,14 @@ define <4 x float> @v4f32(<4 x float> %a
define <8 x float> @v8f32(<8 x float> %a, <8 x float> %b) nounwind {
; AVX512VL-LABEL: v8f32:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to8}, %ymm1, %ymm1
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to8}, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512VLDQ-LABEL: v8f32:
-; AVX512VLDQ: ## BB#0:
+; AVX512VLDQ: ## %bb.0:
; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to8}, %ymm1, %ymm1
; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to8}, %ymm0, %ymm0
; AVX512VLDQ-NEXT: vorps %ymm1, %ymm0, %ymm0
@@ -40,14 +40,14 @@ define <8 x float> @v8f32(<8 x float> %a
define <16 x float> @v16f32(<16 x float> %a, <16 x float> %b) nounwind {
; AVX512VL-LABEL: v16f32:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm1, %zmm1
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512VL-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512VLDQ-LABEL: v16f32:
-; AVX512VLDQ: ## BB#0:
+; AVX512VLDQ: ## %bb.0:
; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to16}, %zmm1, %zmm1
; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512VLDQ-NEXT: vorps %zmm1, %zmm0, %zmm0
@@ -58,14 +58,14 @@ define <16 x float> @v16f32(<16 x float>
define <2 x double> @v2f64(<2 x double> %a, <2 x double> %b) nounwind {
; AVX512VL-LABEL: v2f64:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512VL-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512VLDQ-LABEL: v2f64:
-; AVX512VLDQ: ## BB#0:
+; AVX512VLDQ: ## %bb.0:
; AVX512VLDQ-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
; AVX512VLDQ-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX512VLDQ-NEXT: vorps %xmm1, %xmm0, %xmm0
@@ -76,14 +76,14 @@ define <2 x double> @v2f64(<2 x double>
define <4 x double> @v4f64(<4 x double> %a, <4 x double> %b) nounwind {
; AVX512VL-LABEL: v4f64:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpandq {{.*}}(%rip){1to4}, %ymm1, %ymm1
; AVX512VL-NEXT: vpandq {{.*}}(%rip){1to4}, %ymm0, %ymm0
; AVX512VL-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512VLDQ-LABEL: v4f64:
-; AVX512VLDQ: ## BB#0:
+; AVX512VLDQ: ## %bb.0:
; AVX512VLDQ-NEXT: vandpd {{.*}}(%rip){1to4}, %ymm1, %ymm1
; AVX512VLDQ-NEXT: vandpd {{.*}}(%rip){1to4}, %ymm0, %ymm0
; AVX512VLDQ-NEXT: vorpd %ymm1, %ymm0, %ymm0
@@ -94,14 +94,14 @@ define <4 x double> @v4f64(<4 x double>
define <8 x double> @v8f64(<8 x double> %a, <8 x double> %b) nounwind {
; AVX512VL-LABEL: v8f64:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm1, %zmm1
; AVX512VL-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512VL-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512VLDQ-LABEL: v8f64:
-; AVX512VLDQ: ## BB#0:
+; AVX512VLDQ: ## %bb.0:
; AVX512VLDQ-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm1, %zmm1
; AVX512VLDQ-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512VLDQ-NEXT: vorpd %zmm1, %zmm0, %zmm0
Modified: llvm/trunk/test/CodeGen/X86/vec-copysign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec-copysign.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec-copysign.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec-copysign.ll Mon Dec 4 09:18:51 2017
@@ -18,14 +18,14 @@
define <4 x float> @v4f32(<4 x float> %a, <4 x float> %b) nounwind {
; SSE2-LABEL: v4f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps [[SIGNMASK1]](%rip), %xmm1
; SSE2-NEXT: andps [[MAGMASK1]](%rip), %xmm0
; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps [[SIGNMASK1]](%rip), %xmm1, %xmm1
; AVX-NEXT: vandps [[MAGMASK1]](%rip), %xmm0, %xmm0
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
@@ -69,7 +69,7 @@ define <4 x float> @v4f32(<4 x float> %a
define <8 x float> @v8f32(<8 x float> %a, <8 x float> %b) nounwind {
; SSE2-LABEL: v8f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps [[SIGNMASK2]](%rip), %xmm4
; SSE2-NEXT: andps %xmm4, %xmm2
; SSE2-NEXT: movaps [[MAGMASK2]](%rip), %xmm5
@@ -81,7 +81,7 @@ define <8 x float> @v8f32(<8 x float> %a
; SSE2-NEXT: retq
;
; AVX-LABEL: v8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps [[SIGNMASK2]](%rip), %ymm1, %ymm1
; AVX-NEXT: vandps [[MAGMASK2]](%rip), %ymm0, %ymm0
; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0
@@ -101,14 +101,14 @@ define <8 x float> @v8f32(<8 x float> %a
define <2 x double> @v2f64(<2 x double> %a, <2 x double> %b) nounwind {
; SSE2-LABEL: v2f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: andps [[SIGNMASK3]](%rip), %xmm1
; SSE2-NEXT: andps [[MAGMASK3]](%rip), %xmm0
; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps [[SIGNMASK3]](%rip), %xmm1, %xmm1
; AVX-NEXT: vandps [[MAGMASK3]](%rip), %xmm0, %xmm0
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
@@ -140,7 +140,7 @@ define <2 x double> @v2f64(<2 x double>
define <4 x double> @v4f64(<4 x double> %a, <4 x double> %b) nounwind {
; SSE2-LABEL: v4f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps [[SIGNMASK4]](%rip), %xmm4
; SSE2-NEXT: andps %xmm4, %xmm2
; SSE2-NEXT: movaps [[MAGMASK4]](%rip), %xmm5
@@ -152,7 +152,7 @@ define <4 x double> @v4f64(<4 x double>
; SSE2-NEXT: retq
;
; AVX-LABEL: v4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vandps [[SIGNMASK4]](%rip), %ymm1, %ymm1
; AVX-NEXT: vandps [[MAGMASK4]](%rip), %ymm0, %ymm0
; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0
Modified: llvm/trunk/test/CodeGen/X86/vec-trunc-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec-trunc-store.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec-trunc-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec-trunc-store.ll Mon Dec 4 09:18:51 2017
@@ -3,7 +3,7 @@
define void @foo(<8 x i32>* %p) nounwind {
; CHECK-LABEL: foo:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movdqa (%rdi), %xmm0
; CHECK-NEXT: movdqa 16(%rdi), %xmm1
; CHECK-NEXT: pslld $16, %xmm1
@@ -21,7 +21,7 @@ define void @foo(<8 x i32>* %p) nounwind
define void @bar(<4 x i32>* %p) nounwind {
; CHECK-LABEL: bar:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
Modified: llvm/trunk/test/CodeGen/X86/vec3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec3.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec3.ll Mon Dec 4 09:18:51 2017
@@ -3,7 +3,7 @@
define <3 x float> @fadd(<3 x float> %v, float %d) {
; CHECK-LABEL: fadd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,3]
; CHECK-NEXT: addps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -17,7 +17,7 @@ define <3 x float> @fadd(<3 x float> %v,
define <3 x float> @fdiv(<3 x float> %v, float %d) {
; CHECK-LABEL: fdiv:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,3]
; CHECK-NEXT: divps %xmm0, %xmm1
; CHECK-NEXT: movaps %xmm1, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/vec_cast2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_cast2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_cast2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_cast2.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define <8 x float> @foo1_8(<8 x i8> %src) {
; CHECK-LABEL: foo1_8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: vpslld $24, %xmm0, %xmm0
@@ -16,7 +16,7 @@ define <8 x float> @foo1_8(<8 x i8> %src
; CHECK-NEXT: retl
;
; CHECK-WIDE-LABEL: foo1_8:
-; CHECK-WIDE: ## BB#0:
+; CHECK-WIDE: ## %bb.0:
; CHECK-WIDE-NEXT: vpmovsxbd %xmm0, %xmm1
; CHECK-WIDE-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; CHECK-WIDE-NEXT: vpmovsxbd %xmm0, %xmm0
@@ -29,14 +29,14 @@ define <8 x float> @foo1_8(<8 x i8> %src
define <4 x float> @foo1_4(<4 x i8> %src) {
; CHECK-LABEL: foo1_4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpslld $24, %xmm0, %xmm0
; CHECK-NEXT: vpsrad $24, %xmm0, %xmm0
; CHECK-NEXT: vcvtdq2ps %xmm0, %xmm0
; CHECK-NEXT: retl
;
; CHECK-WIDE-LABEL: foo1_4:
-; CHECK-WIDE: ## BB#0:
+; CHECK-WIDE: ## %bb.0:
; CHECK-WIDE-NEXT: vpmovsxbd %xmm0, %xmm0
; CHECK-WIDE-NEXT: vcvtdq2ps %xmm0, %xmm0
; CHECK-WIDE-NEXT: retl
@@ -46,7 +46,7 @@ define <4 x float> @foo1_4(<4 x i8> %src
define <8 x float> @foo2_8(<8 x i8> %src) {
; CHECK-LABEL: foo2_8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpand LCPI2_0, %xmm0, %xmm0
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -56,7 +56,7 @@ define <8 x float> @foo2_8(<8 x i8> %src
; CHECK-NEXT: retl
;
; CHECK-WIDE-LABEL: foo2_8:
-; CHECK-WIDE: ## BB#0:
+; CHECK-WIDE: ## %bb.0:
; CHECK-WIDE-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK-WIDE-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; CHECK-WIDE-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -69,13 +69,13 @@ define <8 x float> @foo2_8(<8 x i8> %src
define <4 x float> @foo2_4(<4 x i8> %src) {
; CHECK-LABEL: foo2_4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vandps LCPI3_0, %xmm0, %xmm0
; CHECK-NEXT: vcvtdq2ps %xmm0, %xmm0
; CHECK-NEXT: retl
;
; CHECK-WIDE-LABEL: foo2_4:
-; CHECK-WIDE: ## BB#0:
+; CHECK-WIDE: ## %bb.0:
; CHECK-WIDE-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK-WIDE-NEXT: vcvtdq2ps %xmm0, %xmm0
; CHECK-WIDE-NEXT: retl
@@ -85,7 +85,7 @@ define <4 x float> @foo2_4(<4 x i8> %src
define <8 x i8> @foo3_8(<8 x float> %src) {
; CHECK-LABEL: foo3_8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttps2dq %ymm0, %ymm0
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -93,7 +93,7 @@ define <8 x i8> @foo3_8(<8 x float> %src
; CHECK-NEXT: retl
;
; CHECK-WIDE-LABEL: foo3_8:
-; CHECK-WIDE: ## BB#0:
+; CHECK-WIDE: ## %bb.0:
; CHECK-WIDE-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; CHECK-WIDE-NEXT: vcvttss2si %xmm1, %eax
; CHECK-WIDE-NEXT: vcvttss2si %xmm0, %ecx
@@ -125,12 +125,12 @@ define <8 x i8> @foo3_8(<8 x float> %src
define <4 x i8> @foo3_4(<4 x float> %src) {
; CHECK-LABEL: foo3_4:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttps2dq %xmm0, %xmm0
; CHECK-NEXT: retl
;
; CHECK-WIDE-LABEL: foo3_4:
-; CHECK-WIDE: ## BB#0:
+; CHECK-WIDE: ## %bb.0:
; CHECK-WIDE-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; CHECK-WIDE-NEXT: vcvttss2si %xmm1, %eax
; CHECK-WIDE-NEXT: vcvttss2si %xmm0, %ecx
Modified: llvm/trunk/test/CodeGen/X86/vec_cmp_sint-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_cmp_sint-128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_cmp_sint-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_cmp_sint-128.ll Mon Dec 4 09:18:51 2017
@@ -15,29 +15,29 @@
define <2 x i64> @eq_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: eq_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: eq_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pcmpeqq %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: eq_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqq %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: eq_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: eq_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomeqq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp eq <2 x i64> %a, %b
@@ -47,17 +47,17 @@ define <2 x i64> @eq_v2i64(<2 x i64> %a,
define <4 x i32> @eq_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: eq_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: eq_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: eq_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomeqd %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp eq <4 x i32> %a, %b
@@ -67,17 +67,17 @@ define <4 x i32> @eq_v4i32(<4 x i32> %a,
define <8 x i16> @eq_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: eq_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: eq_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: eq_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomeqw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp eq <8 x i16> %a, %b
@@ -87,17 +87,17 @@ define <8 x i16> @eq_v8i16(<8 x i16> %a,
define <16 x i8> @eq_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: eq_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: eq_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: eq_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomeqb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp eq <16 x i8> %a, %b
@@ -111,7 +111,7 @@ define <16 x i8> @eq_v16i8(<16 x i8> %a,
define <2 x i64> @ne_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: ne_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
; SSE2-NEXT: pand %xmm1, %xmm0
@@ -120,28 +120,28 @@ define <2 x i64> @ne_v2i64(<2 x i64> %a,
; SSE2-NEXT: retq
;
; SSE41-LABEL: ne_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pcmpeqq %xmm1, %xmm0
; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
; SSE41-NEXT: pxor %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: ne_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqq %xmm1, %xmm0
; SSE42-NEXT: pcmpeqd %xmm1, %xmm1
; SSE42-NEXT: pxor %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: ne_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ne_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomneqq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ne <2 x i64> %a, %b
@@ -151,21 +151,21 @@ define <2 x i64> @ne_v2i64(<2 x i64> %a,
define <4 x i32> @ne_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: ne_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ne_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ne_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomneqd %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ne <4 x i32> %a, %b
@@ -175,21 +175,21 @@ define <4 x i32> @ne_v4i32(<4 x i32> %a,
define <8 x i16> @ne_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: ne_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqw %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ne_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ne_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomneqw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ne <8 x i16> %a, %b
@@ -199,21 +199,21 @@ define <8 x i16> @ne_v8i16(<8 x i16> %a,
define <16 x i8> @ne_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: ne_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ne_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ne_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomneqb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ne <16 x i8> %a, %b
@@ -227,7 +227,7 @@ define <16 x i8> @ne_v16i8(<16 x i8> %a,
define <2 x i64> @ge_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: ge_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm2, %xmm0
; SSE2-NEXT: pxor %xmm2, %xmm1
@@ -244,7 +244,7 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a,
; SSE2-NEXT: retq
;
; SSE41-LABEL: ge_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE41-NEXT: pxor %xmm2, %xmm0
; SSE41-NEXT: pxor %xmm2, %xmm1
@@ -261,21 +261,21 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a,
; SSE41-NEXT: retq
;
; SSE42-LABEL: ge_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm0, %xmm1
; SSE42-NEXT: pcmpeqd %xmm0, %xmm0
; SSE42-NEXT: pxor %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: ge_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ge_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgeq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sge <2 x i64> %a, %b
@@ -285,21 +285,21 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a,
define <4 x i32> @ge_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: ge_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm0, %xmm1
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ge_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ge_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomged %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sge <4 x i32> %a, %b
@@ -309,21 +309,21 @@ define <4 x i32> @ge_v4i32(<4 x i32> %a,
define <8 x i16> @ge_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: ge_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm0, %xmm1
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ge_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ge_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgew %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sge <8 x i16> %a, %b
@@ -333,21 +333,21 @@ define <8 x i16> @ge_v8i16(<8 x i16> %a,
define <16 x i8> @ge_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: ge_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm0, %xmm1
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ge_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ge_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgeb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sge <16 x i8> %a, %b
@@ -361,7 +361,7 @@ define <16 x i8> @ge_v16i8(<16 x i8> %a,
define <2 x i64> @gt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: gt_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -376,7 +376,7 @@ define <2 x i64> @gt_v2i64(<2 x i64> %a,
; SSE2-NEXT: retq
;
; SSE41-LABEL: gt_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE41-NEXT: pxor %xmm2, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm0
@@ -391,17 +391,17 @@ define <2 x i64> @gt_v2i64(<2 x i64> %a,
; SSE41-NEXT: retq
;
; SSE42-LABEL: gt_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: gt_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: gt_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sgt <2 x i64> %a, %b
@@ -411,17 +411,17 @@ define <2 x i64> @gt_v2i64(<2 x i64> %a,
define <4 x i32> @gt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: gt_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: gt_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: gt_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgtd %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sgt <4 x i32> %a, %b
@@ -431,17 +431,17 @@ define <4 x i32> @gt_v4i32(<4 x i32> %a,
define <8 x i16> @gt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: gt_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: gt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: gt_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgtw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sgt <8 x i16> %a, %b
@@ -451,17 +451,17 @@ define <8 x i16> @gt_v8i16(<8 x i16> %a,
define <16 x i8> @gt_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: gt_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: gt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: gt_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgtb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sgt <16 x i8> %a, %b
@@ -475,7 +475,7 @@ define <16 x i8> @gt_v16i8(<16 x i8> %a,
define <2 x i64> @le_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: le_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -492,7 +492,7 @@ define <2 x i64> @le_v2i64(<2 x i64> %a,
; SSE2-NEXT: retq
;
; SSE41-LABEL: le_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE41-NEXT: pxor %xmm2, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm0
@@ -509,21 +509,21 @@ define <2 x i64> @le_v2i64(<2 x i64> %a,
; SSE41-NEXT: retq
;
; SSE42-LABEL: le_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: pcmpeqd %xmm1, %xmm1
; SSE42-NEXT: pxor %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: le_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: le_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomleq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sle <2 x i64> %a, %b
@@ -533,21 +533,21 @@ define <2 x i64> @le_v2i64(<2 x i64> %a,
define <4 x i32> @le_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: le_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: le_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: le_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomled %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sle <4 x i32> %a, %b
@@ -557,21 +557,21 @@ define <4 x i32> @le_v4i32(<4 x i32> %a,
define <8 x i16> @le_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: le_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: le_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: le_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomlew %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sle <8 x i16> %a, %b
@@ -581,21 +581,21 @@ define <8 x i16> @le_v8i16(<8 x i16> %a,
define <16 x i8> @le_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: le_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: le_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: le_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomleb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp sle <16 x i8> %a, %b
@@ -609,7 +609,7 @@ define <16 x i8> @le_v16i8(<16 x i8> %a,
define <2 x i64> @lt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: lt_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm2, %xmm0
; SSE2-NEXT: pxor %xmm2, %xmm1
@@ -624,7 +624,7 @@ define <2 x i64> @lt_v2i64(<2 x i64> %a,
; SSE2-NEXT: retq
;
; SSE41-LABEL: lt_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE41-NEXT: pxor %xmm2, %xmm0
; SSE41-NEXT: pxor %xmm2, %xmm1
@@ -639,18 +639,18 @@ define <2 x i64> @lt_v2i64(<2 x i64> %a,
; SSE41-NEXT: retq
;
; SSE42-LABEL: lt_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm0, %xmm1
; SSE42-NEXT: movdqa %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: lt_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: lt_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomltq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp slt <2 x i64> %a, %b
@@ -660,18 +660,18 @@ define <2 x i64> @lt_v2i64(<2 x i64> %a,
define <4 x i32> @lt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: lt_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: lt_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: lt_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomltd %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp slt <4 x i32> %a, %b
@@ -681,18 +681,18 @@ define <4 x i32> @lt_v4i32(<4 x i32> %a,
define <8 x i16> @lt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: lt_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: lt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: lt_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomltw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp slt <8 x i16> %a, %b
@@ -702,18 +702,18 @@ define <8 x i16> @lt_v8i16(<8 x i16> %a,
define <16 x i8> @lt_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: lt_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: lt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: lt_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomltb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp slt <16 x i8> %a, %b
Modified: llvm/trunk/test/CodeGen/X86/vec_cmp_uint-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_cmp_uint-128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_cmp_uint-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_cmp_uint-128.ll Mon Dec 4 09:18:51 2017
@@ -15,29 +15,29 @@
define <2 x i64> @eq_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: eq_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: eq_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pcmpeqq %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: eq_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqq %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: eq_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: eq_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomeqq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp eq <2 x i64> %a, %b
@@ -47,17 +47,17 @@ define <2 x i64> @eq_v2i64(<2 x i64> %a,
define <4 x i32> @eq_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: eq_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: eq_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: eq_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomeqd %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp eq <4 x i32> %a, %b
@@ -67,17 +67,17 @@ define <4 x i32> @eq_v4i32(<4 x i32> %a,
define <8 x i16> @eq_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: eq_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: eq_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: eq_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomeqw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp eq <8 x i16> %a, %b
@@ -87,17 +87,17 @@ define <8 x i16> @eq_v8i16(<8 x i16> %a,
define <16 x i8> @eq_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: eq_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: eq_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: eq_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomeqb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp eq <16 x i8> %a, %b
@@ -111,7 +111,7 @@ define <16 x i8> @eq_v16i8(<16 x i8> %a,
define <2 x i64> @ne_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: ne_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
; SSE2-NEXT: pand %xmm1, %xmm0
@@ -120,28 +120,28 @@ define <2 x i64> @ne_v2i64(<2 x i64> %a,
; SSE2-NEXT: retq
;
; SSE41-LABEL: ne_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pcmpeqq %xmm1, %xmm0
; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
; SSE41-NEXT: pxor %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: ne_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqq %xmm1, %xmm0
; SSE42-NEXT: pcmpeqd %xmm1, %xmm1
; SSE42-NEXT: pxor %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: ne_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ne_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomneqq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ne <2 x i64> %a, %b
@@ -151,21 +151,21 @@ define <2 x i64> @ne_v2i64(<2 x i64> %a,
define <4 x i32> @ne_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: ne_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ne_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ne_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomneqd %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ne <4 x i32> %a, %b
@@ -175,21 +175,21 @@ define <4 x i32> @ne_v4i32(<4 x i32> %a,
define <8 x i16> @ne_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: ne_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqw %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ne_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ne_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomneqw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ne <8 x i16> %a, %b
@@ -199,21 +199,21 @@ define <8 x i16> @ne_v8i16(<8 x i16> %a,
define <16 x i8> @ne_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: ne_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ne_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ne_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomneqb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ne <16 x i8> %a, %b
@@ -227,7 +227,7 @@ define <16 x i8> @ne_v16i8(<16 x i8> %a,
define <2 x i64> @ge_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: ge_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm0
; SSE2-NEXT: pxor %xmm2, %xmm1
@@ -244,7 +244,7 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a,
; SSE2-NEXT: retq
;
; SSE41-LABEL: ge_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: pxor %xmm2, %xmm0
; SSE41-NEXT: pxor %xmm2, %xmm1
@@ -261,7 +261,7 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a,
; SSE41-NEXT: retq
;
; SSE42-LABEL: ge_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: pxor %xmm2, %xmm0
; SSE42-NEXT: pxor %xmm1, %xmm2
@@ -271,7 +271,7 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a,
; SSE42-NEXT: retq
;
; AVX1-LABEL: ge_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -281,7 +281,7 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a,
; AVX1-NEXT: retq
;
; AVX2-LABEL: ge_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -291,12 +291,12 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a,
; AVX2-NEXT: retq
;
; XOP-LABEL: ge_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgeuq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: ge_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm1
@@ -310,7 +310,7 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a,
define <4 x i32> @ge_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: ge_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm2
@@ -320,25 +320,25 @@ define <4 x i32> @ge_v4i32(<4 x i32> %a,
; SSE2-NEXT: retq
;
; SSE41-LABEL: ge_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxud %xmm0, %xmm1
; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: ge_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxud %xmm0, %xmm1
; SSE42-NEXT: pcmpeqd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: ge_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ge_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgeud %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp uge <4 x i32> %a, %b
@@ -348,32 +348,32 @@ define <4 x i32> @ge_v4i32(<4 x i32> %a,
define <8 x i16> @ge_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-LABEL: ge_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psubusw %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: pcmpeqw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: ge_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxuw %xmm0, %xmm1
; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: ge_v8i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxuw %xmm0, %xmm1
; SSE42-NEXT: pcmpeqw %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: ge_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ge_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgeuw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp uge <8 x i16> %a, %b
@@ -383,19 +383,19 @@ define <8 x i16> @ge_v8i16(<8 x i16> %a,
define <16 x i8> @ge_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: ge_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxub %xmm0, %xmm1
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: ge_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: ge_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgeub %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp uge <16 x i8> %a, %b
@@ -409,7 +409,7 @@ define <16 x i8> @ge_v16i8(<16 x i8> %a,
define <2 x i64> @gt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: gt_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -424,7 +424,7 @@ define <2 x i64> @gt_v2i64(<2 x i64> %a,
; SSE2-NEXT: retq
;
; SSE41-LABEL: gt_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: pxor %xmm2, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm0
@@ -439,7 +439,7 @@ define <2 x i64> @gt_v2i64(<2 x i64> %a,
; SSE41-NEXT: retq
;
; SSE42-LABEL: gt_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: pxor %xmm2, %xmm1
; SSE42-NEXT: pxor %xmm2, %xmm0
@@ -447,7 +447,7 @@ define <2 x i64> @gt_v2i64(<2 x i64> %a,
; SSE42-NEXT: retq
;
; AVX-LABEL: gt_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -455,7 +455,7 @@ define <2 x i64> @gt_v2i64(<2 x i64> %a,
; AVX-NEXT: retq
;
; XOP-LABEL: gt_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgtuq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ugt <2 x i64> %a, %b
@@ -465,7 +465,7 @@ define <2 x i64> @gt_v2i64(<2 x i64> %a,
define <4 x i32> @gt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: gt_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE-NEXT: pxor %xmm2, %xmm1
; SSE-NEXT: pxor %xmm2, %xmm0
@@ -473,7 +473,7 @@ define <4 x i32> @gt_v4i32(<4 x i32> %a,
; SSE-NEXT: retq
;
; AVX1-LABEL: gt_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -481,7 +481,7 @@ define <4 x i32> @gt_v4i32(<4 x i32> %a,
; AVX1-NEXT: retq
;
; AVX2-LABEL: gt_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -489,12 +489,12 @@ define <4 x i32> @gt_v4i32(<4 x i32> %a,
; AVX2-NEXT: retq
;
; XOP-LABEL: gt_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgtud %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: gt_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX512-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -507,7 +507,7 @@ define <4 x i32> @gt_v4i32(<4 x i32> %a,
define <8 x i16> @gt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: gt_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE-NEXT: pxor %xmm2, %xmm1
; SSE-NEXT: pxor %xmm2, %xmm0
@@ -515,7 +515,7 @@ define <8 x i16> @gt_v8i16(<8 x i16> %a,
; SSE-NEXT: retq
;
; AVX-LABEL: gt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -523,7 +523,7 @@ define <8 x i16> @gt_v8i16(<8 x i16> %a,
; AVX-NEXT: retq
;
; XOP-LABEL: gt_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgtuw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ugt <8 x i16> %a, %b
@@ -533,7 +533,7 @@ define <8 x i16> @gt_v8i16(<8 x i16> %a,
define <16 x i8> @gt_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: gt_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; SSE-NEXT: pxor %xmm2, %xmm1
; SSE-NEXT: pxor %xmm2, %xmm0
@@ -541,7 +541,7 @@ define <16 x i8> @gt_v16i8(<16 x i8> %a,
; SSE-NEXT: retq
;
; AVX-LABEL: gt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -549,7 +549,7 @@ define <16 x i8> @gt_v16i8(<16 x i8> %a,
; AVX-NEXT: retq
;
; XOP-LABEL: gt_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomgtub %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ugt <16 x i8> %a, %b
@@ -563,7 +563,7 @@ define <16 x i8> @gt_v16i8(<16 x i8> %a,
define <2 x i64> @le_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: le_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -580,7 +580,7 @@ define <2 x i64> @le_v2i64(<2 x i64> %a,
; SSE2-NEXT: retq
;
; SSE41-LABEL: le_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: pxor %xmm2, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm0
@@ -597,7 +597,7 @@ define <2 x i64> @le_v2i64(<2 x i64> %a,
; SSE41-NEXT: retq
;
; SSE42-LABEL: le_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: pxor %xmm2, %xmm1
; SSE42-NEXT: pxor %xmm2, %xmm0
@@ -607,7 +607,7 @@ define <2 x i64> @le_v2i64(<2 x i64> %a,
; SSE42-NEXT: retq
;
; AVX1-LABEL: le_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -617,7 +617,7 @@ define <2 x i64> @le_v2i64(<2 x i64> %a,
; AVX1-NEXT: retq
;
; AVX2-LABEL: le_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -627,12 +627,12 @@ define <2 x i64> @le_v2i64(<2 x i64> %a,
; AVX2-NEXT: retq
;
; XOP-LABEL: le_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomleuq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: le_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm1
@@ -646,7 +646,7 @@ define <2 x i64> @le_v2i64(<2 x i64> %a,
define <4 x i32> @le_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: le_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -656,25 +656,25 @@ define <4 x i32> @le_v4i32(<4 x i32> %a,
; SSE2-NEXT: retq
;
; SSE41-LABEL: le_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminud %xmm0, %xmm1
; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: le_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminud %xmm0, %xmm1
; SSE42-NEXT: pcmpeqd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: le_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: le_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomleud %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ule <4 x i32> %a, %b
@@ -684,32 +684,32 @@ define <4 x i32> @le_v4i32(<4 x i32> %a,
define <8 x i16> @le_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-LABEL: le_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psubusw %xmm1, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pcmpeqw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: le_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminuw %xmm0, %xmm1
; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: le_v8i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminuw %xmm0, %xmm1
; SSE42-NEXT: pcmpeqw %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: le_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: le_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomleuw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ule <8 x i16> %a, %b
@@ -719,19 +719,19 @@ define <8 x i16> @le_v8i16(<8 x i16> %a,
define <16 x i8> @le_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: le_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminub %xmm0, %xmm1
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: le_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: le_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomleub %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ule <16 x i8> %a, %b
@@ -745,7 +745,7 @@ define <16 x i8> @le_v16i8(<16 x i8> %a,
define <2 x i64> @lt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: lt_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm0
; SSE2-NEXT: pxor %xmm2, %xmm1
@@ -760,7 +760,7 @@ define <2 x i64> @lt_v2i64(<2 x i64> %a,
; SSE2-NEXT: retq
;
; SSE41-LABEL: lt_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: pxor %xmm2, %xmm0
; SSE41-NEXT: pxor %xmm2, %xmm1
@@ -775,7 +775,7 @@ define <2 x i64> @lt_v2i64(<2 x i64> %a,
; SSE41-NEXT: retq
;
; SSE42-LABEL: lt_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: pxor %xmm2, %xmm0
; SSE42-NEXT: pxor %xmm1, %xmm2
@@ -784,7 +784,7 @@ define <2 x i64> @lt_v2i64(<2 x i64> %a,
; SSE42-NEXT: retq
;
; AVX-LABEL: lt_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -792,7 +792,7 @@ define <2 x i64> @lt_v2i64(<2 x i64> %a,
; AVX-NEXT: retq
;
; XOP-LABEL: lt_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomltuq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ult <2 x i64> %a, %b
@@ -802,7 +802,7 @@ define <2 x i64> @lt_v2i64(<2 x i64> %a,
define <4 x i32> @lt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE-LABEL: lt_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE-NEXT: pxor %xmm2, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm2
@@ -811,7 +811,7 @@ define <4 x i32> @lt_v4i32(<4 x i32> %a,
; SSE-NEXT: retq
;
; AVX1-LABEL: lt_v4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -819,7 +819,7 @@ define <4 x i32> @lt_v4i32(<4 x i32> %a,
; AVX1-NEXT: retq
;
; AVX2-LABEL: lt_v4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -827,12 +827,12 @@ define <4 x i32> @lt_v4i32(<4 x i32> %a,
; AVX2-NEXT: retq
;
; XOP-LABEL: lt_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomltud %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512-LABEL: lt_v4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -845,7 +845,7 @@ define <4 x i32> @lt_v4i32(<4 x i32> %a,
define <8 x i16> @lt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE-LABEL: lt_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE-NEXT: pxor %xmm2, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm2
@@ -854,7 +854,7 @@ define <8 x i16> @lt_v8i16(<8 x i16> %a,
; SSE-NEXT: retq
;
; AVX-LABEL: lt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -862,7 +862,7 @@ define <8 x i16> @lt_v8i16(<8 x i16> %a,
; AVX-NEXT: retq
;
; XOP-LABEL: lt_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomltuw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ult <8 x i16> %a, %b
@@ -872,7 +872,7 @@ define <8 x i16> @lt_v8i16(<8 x i16> %a,
define <16 x i8> @lt_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE-LABEL: lt_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; SSE-NEXT: pxor %xmm2, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm2
@@ -881,7 +881,7 @@ define <16 x i8> @lt_v16i8(<16 x i8> %a,
; SSE-NEXT: retq
;
; AVX-LABEL: lt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
@@ -889,7 +889,7 @@ define <16 x i8> @lt_v16i8(<16 x i8> %a,
; AVX-NEXT: retq
;
; XOP-LABEL: lt_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpcomltub %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
%1 = icmp ult <16 x i8> %a, %b
Modified: llvm/trunk/test/CodeGen/X86/vec_compare-sse4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_compare-sse4.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_compare-sse4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_compare-sse4.ll Mon Dec 4 09:18:51 2017
@@ -5,7 +5,7 @@
define <2 x i64> @test1(<2 x i64> %A, <2 x i64> %B) nounwind {
; SSE2-LABEL: test1:
-; SSE2: ## BB#0:
+; SSE2: ## %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -20,7 +20,7 @@ define <2 x i64> @test1(<2 x i64> %A, <2
; SSE2-NEXT: retl
;
; SSE41-LABEL: test1:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE41-NEXT: pxor %xmm2, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm0
@@ -35,7 +35,7 @@ define <2 x i64> @test1(<2 x i64> %A, <2
; SSE41-NEXT: retl
;
; SSE42-LABEL: test1:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: retl
%C = icmp sgt <2 x i64> %A, %B
@@ -45,19 +45,19 @@ define <2 x i64> @test1(<2 x i64> %A, <2
define <2 x i64> @test2(<2 x i64> %A, <2 x i64> %B) nounwind {
; SSE2-LABEL: test2:
-; SSE2: ## BB#0:
+; SSE2: ## %bb.0:
; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: retl
;
; SSE41-LABEL: test2:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: pcmpeqq %xmm1, %xmm0
; SSE41-NEXT: retl
;
; SSE42-LABEL: test2:
-; SSE42: ## BB#0:
+; SSE42: ## %bb.0:
; SSE42-NEXT: pcmpeqq %xmm1, %xmm0
; SSE42-NEXT: retl
%C = icmp eq <2 x i64> %A, %B
Modified: llvm/trunk/test/CodeGen/X86/vec_ctbits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_ctbits.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_ctbits.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_ctbits.ll Mon Dec 4 09:18:51 2017
@@ -7,7 +7,7 @@ declare <2 x i64> @llvm.ctpop.v2i64(<2 x
define <2 x i64> @footz(<2 x i64> %a) nounwind {
; CHECK-LABEL: footz:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pxor %xmm2, %xmm2
; CHECK-NEXT: psubq %xmm0, %xmm2
@@ -36,7 +36,7 @@ define <2 x i64> @footz(<2 x i64> %a) no
}
define <2 x i64> @foolz(<2 x i64> %a) nounwind {
; CHECK-LABEL: foolz:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movdqa %xmm0, %xmm1
; CHECK-NEXT: psrlq $1, %xmm1
; CHECK-NEXT: por %xmm0, %xmm1
@@ -81,7 +81,7 @@ define <2 x i64> @foolz(<2 x i64> %a) no
define <2 x i64> @foopop(<2 x i64> %a) nounwind {
; CHECK-LABEL: foopop:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movdqa %xmm0, %xmm1
; CHECK-NEXT: psrlq $1, %xmm1
; CHECK-NEXT: pand {{.*}}(%rip), %xmm1
@@ -110,7 +110,7 @@ declare <2 x i32> @llvm.ctpop.v2i32(<2 x
define <2 x i32> @promtz(<2 x i32> %a) nounwind {
; CHECK-LABEL: promtz:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: por {{.*}}(%rip), %xmm0
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: pxor %xmm2, %xmm2
@@ -140,7 +140,7 @@ define <2 x i32> @promtz(<2 x i32> %a) n
}
define <2 x i32> @promlz(<2 x i32> %a) nounwind {
; CHECK-LABEL: promlz:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: movdqa %xmm0, %xmm2
@@ -187,7 +187,7 @@ define <2 x i32> @promlz(<2 x i32> %a) n
define <2 x i32> @prompop(<2 x i32> %a) nounwind {
; CHECK-LABEL: prompop:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
; CHECK-NEXT: pxor %xmm2, %xmm2
; CHECK-NEXT: movdqa %xmm0, %xmm1
Modified: llvm/trunk/test/CodeGen/X86/vec_ext_inreg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_ext_inreg.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_ext_inreg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_ext_inreg.ll Mon Dec 4 09:18:51 2017
@@ -5,7 +5,7 @@
define <8 x i32> @a(<8 x i32> %a) nounwind {
; SSE-LABEL: a:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $16, %xmm0
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: pslld $16, %xmm1
@@ -13,7 +13,7 @@ define <8 x i32> @a(<8 x i32> %a) nounwi
; SSE-NEXT: retq
;
; AVX1-LABEL: a:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpslld $16, %xmm0, %xmm1
; AVX1-NEXT: vpsrad $16, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -23,7 +23,7 @@ define <8 x i32> @a(<8 x i32> %a) nounwi
; AVX1-NEXT: retq
;
; AVX2-LABEL: a:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpslld $16, %ymm0, %ymm0
; AVX2-NEXT: vpsrad $16, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -34,13 +34,13 @@ define <8 x i32> @a(<8 x i32> %a) nounwi
define <3 x i32> @b(<3 x i32> %a) nounwind {
; SSE-LABEL: b:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pslld $16, %xmm0
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: b:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpslld $16, %xmm0, %xmm0
; AVX-NEXT: vpsrad $16, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -51,7 +51,7 @@ define <3 x i32> @b(<3 x i32> %a) nounwi
define <1 x i32> @c(<1 x i32> %a) nounwind {
; ALL-LABEL: c:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl %di, %eax
; ALL-NEXT: retq
%b = trunc <1 x i32> %a to <1 x i16>
@@ -61,19 +61,19 @@ define <1 x i32> @c(<1 x i32> %a) nounwi
define <8 x i32> @d(<8 x i32> %a) nounwind {
; SSE-LABEL: d:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: andps %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: d:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: d:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2-NEXT: retq
@@ -84,12 +84,12 @@ define <8 x i32> @d(<8 x i32> %a) nounwi
define <3 x i32> @e(<3 x i32> %a) nounwind {
; SSE-LABEL: e:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: e:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6,7]
; AVX-NEXT: retq
@@ -100,7 +100,7 @@ define <3 x i32> @e(<3 x i32> %a) nounwi
define <1 x i32> @f(<1 x i32> %a) nounwind {
; ALL-LABEL: f:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movzwl %di, %eax
; ALL-NEXT: retq
%b = trunc <1 x i32> %a to <1 x i16>
Modified: llvm/trunk/test/CodeGen/X86/vec_extract-avx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_extract-avx.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_extract-avx.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_extract-avx.ll Mon Dec 4 09:18:51 2017
@@ -10,14 +10,14 @@
; Extracting the low elements only requires using the right kind of store.
define void @low_v8f32_to_v4f32(<8 x float> %v, <4 x float>* %ptr) {
; X32-LABEL: low_v8f32_to_v4f32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovaps %xmm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: low_v8f32_to_v4f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps %xmm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -36,14 +36,14 @@ define void @low_v8f32_to_v4f32(<8 x flo
; Extracting the high elements requires just one AVX instruction.
define void @high_v8f32_to_v4f32(<8 x float> %v, <4 x float>* %ptr) {
; X32-LABEL: high_v8f32_to_v4f32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vextractf128 $1, %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: high_v8f32_to_v4f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -64,14 +64,14 @@ define void @high_v8f32_to_v4f32(<8 x fl
; have AVX2, we should generate vextracti128 (the int version).
define void @high_v8i32_to_v4i32(<8 x i32> %v, <4 x i32>* %ptr) {
; X32-LABEL: high_v8i32_to_v4i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vextractf128 $1, %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: high_v8i32_to_v4i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -90,14 +90,14 @@ define void @high_v8i32_to_v4i32(<8 x i3
; Make sure that element size doesn't alter the codegen.
define void @high_v4f64_to_v2f64(<4 x double> %v, <2 x double>* %ptr) {
; X32-LABEL: high_v4f64_to_v2f64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vextractf128 $1, %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: high_v4f64_to_v2f64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextractf128 $1, %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -114,7 +114,7 @@ define void @high_v4f64_to_v2f64(<4 x do
define void @legal_vzmovl_2i32_8i32(<2 x i32>* %in, <8 x i32>* %out) {
; X32-LABEL: legal_vzmovl_2i32_8i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -125,7 +125,7 @@ define void @legal_vzmovl_2i32_8i32(<2 x
; X32-NEXT: retl
;
; X64-LABEL: legal_vzmovl_2i32_8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
@@ -141,7 +141,7 @@ define void @legal_vzmovl_2i32_8i32(<2 x
define void @legal_vzmovl_2i64_4i64(<2 x i64>* %in, <4 x i64>* %out) {
; X32-LABEL: legal_vzmovl_2i64_4i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovupd (%ecx), %xmm0
@@ -152,7 +152,7 @@ define void @legal_vzmovl_2i64_4i64(<2 x
; X32-NEXT: retl
;
; X64-LABEL: legal_vzmovl_2i64_4i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovupd (%rdi), %xmm0
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
@@ -168,7 +168,7 @@ define void @legal_vzmovl_2i64_4i64(<2 x
define void @legal_vzmovl_2f32_8f32(<2 x float>* %in, <8 x float>* %out) {
; X32-LABEL: legal_vzmovl_2f32_8f32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -177,7 +177,7 @@ define void @legal_vzmovl_2f32_8f32(<2 x
; X32-NEXT: retl
;
; X64-LABEL: legal_vzmovl_2f32_8f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
@@ -193,7 +193,7 @@ define void @legal_vzmovl_2f32_8f32(<2 x
define void @legal_vzmovl_2f64_4f64(<2 x double>* %in, <4 x double>* %out) {
; X32-LABEL: legal_vzmovl_2f64_4f64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovupd (%ecx), %xmm0
@@ -204,7 +204,7 @@ define void @legal_vzmovl_2f64_4f64(<2 x
; X32-NEXT: retl
;
; X64-LABEL: legal_vzmovl_2f64_4f64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovupd (%rdi), %xmm0
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
Modified: llvm/trunk/test/CodeGen/X86/vec_extract-mmx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_extract-mmx.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_extract-mmx.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_extract-mmx.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define i32 @test0(<1 x i64>* %v4) nounwind {
; X32-LABEL: test0:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-8, %esp
@@ -22,7 +22,7 @@ define i32 @test0(<1 x i64>* %v4) nounwi
; X32-NEXT: retl
;
; X64-LABEL: test0:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pshufw $238, (%rdi), %mm0 # mm0 = mem[2,3,2,3]
; X64-NEXT: movd %mm0, %eax
; X64-NEXT: addl $32, %eax
@@ -43,7 +43,7 @@ entry:
define i32 @test1(i32* nocapture readonly %ptr) nounwind {
; X32-LABEL: test1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd (%eax), %mm0
; X32-NEXT: pshufw $232, %mm0, %mm0 # mm0 = mm0[0,2,2,3]
@@ -52,7 +52,7 @@ define i32 @test1(i32* nocapture readonl
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd (%rdi), %mm0
; X64-NEXT: pshufw $232, %mm0, %mm0 # mm0 = mm0[0,2,2,3]
; X64-NEXT: movd %mm0, %eax
@@ -78,7 +78,7 @@ entry:
define i32 @test2(i32* nocapture readonly %ptr) nounwind {
; X32-LABEL: test2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: pshufw $232, (%eax), %mm0 # mm0 = mem[0,2,2,3]
; X32-NEXT: movd %mm0, %eax
@@ -86,7 +86,7 @@ define i32 @test2(i32* nocapture readonl
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pshufw $232, (%rdi), %mm0 # mm0 = mem[0,2,2,3]
; X64-NEXT: movd %mm0, %eax
; X64-NEXT: emms
@@ -106,12 +106,12 @@ entry:
define i32 @test3(x86_mmx %a) nounwind {
; X32-LABEL: test3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movd %mm0, %eax
; X32-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd %mm0, %eax
; X64-NEXT: retq
%tmp0 = bitcast x86_mmx %a to <2 x i32>
@@ -122,7 +122,7 @@ define i32 @test3(x86_mmx %a) nounwind {
; Verify we don't muck with extractelts from the upper lane.
define i32 @test4(x86_mmx %a) nounwind {
; X32-LABEL: test4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-8, %esp
@@ -136,7 +136,7 @@ define i32 @test4(x86_mmx %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,0,1]
Modified: llvm/trunk/test/CodeGen/X86/vec_extract-sse4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_extract-sse4.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_extract-sse4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_extract-sse4.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define void @t1(float* %R, <4 x float>* %P1) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -12,7 +12,7 @@ define void @t1(float* %R, <4 x float>*
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: movss %xmm0, (%rdi)
; X64-NEXT: retq
@@ -24,7 +24,7 @@ define void @t1(float* %R, <4 x float>*
define float @t2(<4 x float>* %P1) nounwind {
; X32-LABEL: t2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
@@ -34,7 +34,7 @@ define float @t2(<4 x float>* %P1) nounw
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
%X = load <4 x float>, <4 x float>* %P1
@@ -44,7 +44,7 @@ define float @t2(<4 x float>* %P1) nounw
define void @t3(i32* %R, <4 x i32>* %P1) nounwind {
; X32-LABEL: t3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl 12(%ecx), %ecx
@@ -52,7 +52,7 @@ define void @t3(i32* %R, <4 x i32>* %P1)
; X32-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl 12(%rsi), %eax
; X64-NEXT: movl %eax, (%rdi)
; X64-NEXT: retq
@@ -64,13 +64,13 @@ define void @t3(i32* %R, <4 x i32>* %P1)
define i32 @t4(<4 x i32>* %P1) nounwind {
; X32-LABEL: t4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 12(%eax), %eax
; X32-NEXT: retl
;
; X64-LABEL: t4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl 12(%rdi), %eax
; X64-NEXT: retq
%X = load <4 x i32>, <4 x i32>* %P1
Modified: llvm/trunk/test/CodeGen/X86/vec_extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_extract.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_extract.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_extract.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define void @test1(<4 x float>* %F, float* %f) nounwind {
; X32-LABEL: test1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movaps (%ecx), %xmm0
@@ -13,7 +13,7 @@ define void @test1(<4 x float>* %F, floa
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movaps (%rdi), %xmm0
; X64-NEXT: addps %xmm0, %xmm0
; X64-NEXT: movss %xmm0, (%rsi)
@@ -28,7 +28,7 @@ entry:
define float @test2(<4 x float>* %F, float* %f) nounwind {
; X32-LABEL: test2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movaps (%eax), %xmm0
@@ -40,7 +40,7 @@ define float @test2(<4 x float>* %F, flo
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movaps (%rdi), %xmm0
; X64-NEXT: addps %xmm0, %xmm0
; X64-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -54,7 +54,7 @@ entry:
define void @test3(float* %R, <4 x float>* %P1) nounwind {
; X32-LABEL: test3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -62,7 +62,7 @@ define void @test3(float* %R, <4 x float
; X32-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: movss %xmm0, (%rdi)
; X64-NEXT: retq
@@ -75,7 +75,7 @@ entry:
define double @test4(double %A) nounwind {
; X32-LABEL: test4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: subl $12, %esp
; X32-NEXT: calll foo
; X32-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -86,7 +86,7 @@ define double @test4(double %A) nounwind
; X32-NEXT: retl
;
; X64-LABEL: test4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pushq %rax
; X64-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; X64-NEXT: callq foo
Modified: llvm/trunk/test/CodeGen/X86/vec_fabs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_fabs.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_fabs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_fabs.ll Mon Dec 4 09:18:51 2017
@@ -11,32 +11,32 @@
define <2 x double> @fabs_v2f64(<2 x double> %p) {
; X32_AVX-LABEL: fabs_v2f64:
-; X32_AVX: # BB#0:
+; X32_AVX: # %bb.0:
; X32_AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
; X32_AVX-NEXT: retl
;
; X32_AVX512VL-LABEL: fabs_v2f64:
-; X32_AVX512VL: # BB#0:
+; X32_AVX512VL: # %bb.0:
; X32_AVX512VL-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X32_AVX512VL-NEXT: retl
;
; X32_AVX512VLDQ-LABEL: fabs_v2f64:
-; X32_AVX512VLDQ: # BB#0:
+; X32_AVX512VLDQ: # %bb.0:
; X32_AVX512VLDQ-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
; X32_AVX512VLDQ-NEXT: retl
;
; X64_AVX-LABEL: fabs_v2f64:
-; X64_AVX: # BB#0:
+; X64_AVX: # %bb.0:
; X64_AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX-NEXT: retq
;
; X64_AVX512VL-LABEL: fabs_v2f64:
-; X64_AVX512VL: # BB#0:
+; X64_AVX512VL: # %bb.0:
; X64_AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX512VL-NEXT: retq
;
; X64_AVX512VLDQ-LABEL: fabs_v2f64:
-; X64_AVX512VLDQ: # BB#0:
+; X64_AVX512VLDQ: # %bb.0:
; X64_AVX512VLDQ-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX512VLDQ-NEXT: retq
%t = call <2 x double> @llvm.fabs.v2f64(<2 x double> %p)
@@ -46,32 +46,32 @@ declare <2 x double> @llvm.fabs.v2f64(<2
define <4 x float> @fabs_v4f32(<4 x float> %p) {
; X32_AVX-LABEL: fabs_v4f32:
-; X32_AVX: # BB#0:
+; X32_AVX: # %bb.0:
; X32_AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
; X32_AVX-NEXT: retl
;
; X32_AVX512VL-LABEL: fabs_v4f32:
-; X32_AVX512VL: # BB#0:
+; X32_AVX512VL: # %bb.0:
; X32_AVX512VL-NEXT: vpandd {{\.LCPI.*}}{1to4}, %xmm0, %xmm0
; X32_AVX512VL-NEXT: retl
;
; X32_AVX512VLDQ-LABEL: fabs_v4f32:
-; X32_AVX512VLDQ: # BB#0:
+; X32_AVX512VLDQ: # %bb.0:
; X32_AVX512VLDQ-NEXT: vandps {{\.LCPI.*}}{1to4}, %xmm0, %xmm0
; X32_AVX512VLDQ-NEXT: retl
;
; X64_AVX-LABEL: fabs_v4f32:
-; X64_AVX: # BB#0:
+; X64_AVX: # %bb.0:
; X64_AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX-NEXT: retq
;
; X64_AVX512VL-LABEL: fabs_v4f32:
-; X64_AVX512VL: # BB#0:
+; X64_AVX512VL: # %bb.0:
; X64_AVX512VL-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm0, %xmm0
; X64_AVX512VL-NEXT: retq
;
; X64_AVX512VLDQ-LABEL: fabs_v4f32:
-; X64_AVX512VLDQ: # BB#0:
+; X64_AVX512VLDQ: # %bb.0:
; X64_AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to4}, %xmm0, %xmm0
; X64_AVX512VLDQ-NEXT: retq
%t = call <4 x float> @llvm.fabs.v4f32(<4 x float> %p)
@@ -81,32 +81,32 @@ declare <4 x float> @llvm.fabs.v4f32(<4
define <4 x double> @fabs_v4f64(<4 x double> %p) {
; X32_AVX-LABEL: fabs_v4f64:
-; X32_AVX: # BB#0:
+; X32_AVX: # %bb.0:
; X32_AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
; X32_AVX-NEXT: retl
;
; X32_AVX512VL-LABEL: fabs_v4f64:
-; X32_AVX512VL: # BB#0:
+; X32_AVX512VL: # %bb.0:
; X32_AVX512VL-NEXT: vpandq {{\.LCPI.*}}{1to4}, %ymm0, %ymm0
; X32_AVX512VL-NEXT: retl
;
; X32_AVX512VLDQ-LABEL: fabs_v4f64:
-; X32_AVX512VLDQ: # BB#0:
+; X32_AVX512VLDQ: # %bb.0:
; X32_AVX512VLDQ-NEXT: vandpd {{\.LCPI.*}}{1to4}, %ymm0, %ymm0
; X32_AVX512VLDQ-NEXT: retl
;
; X64_AVX-LABEL: fabs_v4f64:
-; X64_AVX: # BB#0:
+; X64_AVX: # %bb.0:
; X64_AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; X64_AVX-NEXT: retq
;
; X64_AVX512VL-LABEL: fabs_v4f64:
-; X64_AVX512VL: # BB#0:
+; X64_AVX512VL: # %bb.0:
; X64_AVX512VL-NEXT: vpandq {{.*}}(%rip){1to4}, %ymm0, %ymm0
; X64_AVX512VL-NEXT: retq
;
; X64_AVX512VLDQ-LABEL: fabs_v4f64:
-; X64_AVX512VLDQ: # BB#0:
+; X64_AVX512VLDQ: # %bb.0:
; X64_AVX512VLDQ-NEXT: vandpd {{.*}}(%rip){1to4}, %ymm0, %ymm0
; X64_AVX512VLDQ-NEXT: retq
%t = call <4 x double> @llvm.fabs.v4f64(<4 x double> %p)
@@ -116,32 +116,32 @@ declare <4 x double> @llvm.fabs.v4f64(<4
define <8 x float> @fabs_v8f32(<8 x float> %p) {
; X32_AVX-LABEL: fabs_v8f32:
-; X32_AVX: # BB#0:
+; X32_AVX: # %bb.0:
; X32_AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
; X32_AVX-NEXT: retl
;
; X32_AVX512VL-LABEL: fabs_v8f32:
-; X32_AVX512VL: # BB#0:
+; X32_AVX512VL: # %bb.0:
; X32_AVX512VL-NEXT: vpandd {{\.LCPI.*}}{1to8}, %ymm0, %ymm0
; X32_AVX512VL-NEXT: retl
;
; X32_AVX512VLDQ-LABEL: fabs_v8f32:
-; X32_AVX512VLDQ: # BB#0:
+; X32_AVX512VLDQ: # %bb.0:
; X32_AVX512VLDQ-NEXT: vandps {{\.LCPI.*}}{1to8}, %ymm0, %ymm0
; X32_AVX512VLDQ-NEXT: retl
;
; X64_AVX-LABEL: fabs_v8f32:
-; X64_AVX: # BB#0:
+; X64_AVX: # %bb.0:
; X64_AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; X64_AVX-NEXT: retq
;
; X64_AVX512VL-LABEL: fabs_v8f32:
-; X64_AVX512VL: # BB#0:
+; X64_AVX512VL: # %bb.0:
; X64_AVX512VL-NEXT: vpandd {{.*}}(%rip){1to8}, %ymm0, %ymm0
; X64_AVX512VL-NEXT: retq
;
; X64_AVX512VLDQ-LABEL: fabs_v8f32:
-; X64_AVX512VLDQ: # BB#0:
+; X64_AVX512VLDQ: # %bb.0:
; X64_AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to8}, %ymm0, %ymm0
; X64_AVX512VLDQ-NEXT: retq
%t = call <8 x float> @llvm.fabs.v8f32(<8 x float> %p)
@@ -151,36 +151,36 @@ declare <8 x float> @llvm.fabs.v8f32(<8
define <8 x double> @fabs_v8f64(<8 x double> %p) {
; X32_AVX-LABEL: fabs_v8f64:
-; X32_AVX: # BB#0:
+; X32_AVX: # %bb.0:
; X32_AVX-NEXT: vmovaps {{.*#+}} ymm2 = [{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}}]
; X32_AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
; X32_AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
; X32_AVX-NEXT: retl
;
; X32_AVX512VL-LABEL: fabs_v8f64:
-; X32_AVX512VL: # BB#0:
+; X32_AVX512VL: # %bb.0:
; X32_AVX512VL-NEXT: vpandq {{\.LCPI.*}}{1to8}, %zmm0, %zmm0
; X32_AVX512VL-NEXT: retl
;
; X32_AVX512VLDQ-LABEL: fabs_v8f64:
-; X32_AVX512VLDQ: # BB#0:
+; X32_AVX512VLDQ: # %bb.0:
; X32_AVX512VLDQ-NEXT: vandpd {{\.LCPI.*}}{1to8}, %zmm0, %zmm0
; X32_AVX512VLDQ-NEXT: retl
;
; X64_AVX-LABEL: fabs_v8f64:
-; X64_AVX: # BB#0:
+; X64_AVX: # %bb.0:
; X64_AVX-NEXT: vmovaps {{.*#+}} ymm2 = [{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}}]
; X64_AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
; X64_AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
; X64_AVX-NEXT: retq
;
; X64_AVX512VL-LABEL: fabs_v8f64:
-; X64_AVX512VL: # BB#0:
+; X64_AVX512VL: # %bb.0:
; X64_AVX512VL-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; X64_AVX512VL-NEXT: retq
;
; X64_AVX512VLDQ-LABEL: fabs_v8f64:
-; X64_AVX512VLDQ: # BB#0:
+; X64_AVX512VLDQ: # %bb.0:
; X64_AVX512VLDQ-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; X64_AVX512VLDQ-NEXT: retq
%t = call <8 x double> @llvm.fabs.v8f64(<8 x double> %p)
@@ -190,36 +190,36 @@ declare <8 x double> @llvm.fabs.v8f64(<8
define <16 x float> @fabs_v16f32(<16 x float> %p) {
; X32_AVX-LABEL: fabs_v16f32:
-; X32_AVX: # BB#0:
+; X32_AVX: # %bb.0:
; X32_AVX-NEXT: vmovaps {{.*#+}} ymm2 = [{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}}]
; X32_AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
; X32_AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
; X32_AVX-NEXT: retl
;
; X32_AVX512VL-LABEL: fabs_v16f32:
-; X32_AVX512VL: # BB#0:
+; X32_AVX512VL: # %bb.0:
; X32_AVX512VL-NEXT: vpandd {{\.LCPI.*}}{1to16}, %zmm0, %zmm0
; X32_AVX512VL-NEXT: retl
;
; X32_AVX512VLDQ-LABEL: fabs_v16f32:
-; X32_AVX512VLDQ: # BB#0:
+; X32_AVX512VLDQ: # %bb.0:
; X32_AVX512VLDQ-NEXT: vandps {{\.LCPI.*}}{1to16}, %zmm0, %zmm0
; X32_AVX512VLDQ-NEXT: retl
;
; X64_AVX-LABEL: fabs_v16f32:
-; X64_AVX: # BB#0:
+; X64_AVX: # %bb.0:
; X64_AVX-NEXT: vmovaps {{.*#+}} ymm2 = [{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}},{{(nan|1\.#QNAN0e\+00)}}]
; X64_AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
; X64_AVX-NEXT: vandps %ymm2, %ymm1, %ymm1
; X64_AVX-NEXT: retq
;
; X64_AVX512VL-LABEL: fabs_v16f32:
-; X64_AVX512VL: # BB#0:
+; X64_AVX512VL: # %bb.0:
; X64_AVX512VL-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; X64_AVX512VL-NEXT: retq
;
; X64_AVX512VLDQ-LABEL: fabs_v16f32:
-; X64_AVX512VLDQ: # BB#0:
+; X64_AVX512VLDQ: # %bb.0:
; X64_AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; X64_AVX512VLDQ-NEXT: retq
%t = call <16 x float> @llvm.fabs.v16f32(<16 x float> %p)
@@ -244,13 +244,13 @@ declare <16 x float> @llvm.fabs.v16f32(<
define i64 @fabs_v2f32_1() {
; X32-LABEL: fabs_v2f32_1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF
; X32-NEXT: retl
;
; X64-LABEL: fabs_v2f32_1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movabsq $9223372032559808512, %rax # imm = 0x7FFFFFFF00000000
; X64-NEXT: retq
%bitcast = bitcast i64 18446744069414584320 to <2 x float> ; 0xFFFF_FFFF_0000_0000
@@ -261,13 +261,13 @@ define i64 @fabs_v2f32_1() {
define i64 @fabs_v2f32_2() {
; X32-LABEL: fabs_v2f32_2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: retl
;
; X64-LABEL: fabs_v2f32_2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
; X64-NEXT: retq
%bitcast = bitcast i64 4294967295 to <2 x float> ; 0x0000_0000_FFFF_FFFF
Modified: llvm/trunk/test/CodeGen/X86/vec_floor.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_floor.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_floor.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_floor.ll Mon Dec 4 09:18:51 2017
@@ -5,17 +5,17 @@
define <2 x double> @floor_v2f64(<2 x double> %p) {
; SSE41-LABEL: floor_v2f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $9, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: floor_v2f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $9, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: floor_v2f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $9, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p)
@@ -25,17 +25,17 @@ declare <2 x double> @llvm.floor.v2f64(<
define <4 x float> @floor_v4f32(<4 x float> %p) {
; SSE41-LABEL: floor_v4f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $9, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: floor_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $9, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: floor_v4f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $9, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p)
@@ -45,18 +45,18 @@ declare <4 x float> @llvm.floor.v4f32(<4
define <4 x double> @floor_v4f64(<4 x double> %p){
; SSE41-LABEL: floor_v4f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $9, %xmm0, %xmm0
; SSE41-NEXT: roundpd $9, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: floor_v4f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $9, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: floor_v4f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $9, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p)
@@ -66,18 +66,18 @@ declare <4 x double> @llvm.floor.v4f64(<
define <8 x float> @floor_v8f32(<8 x float> %p) {
; SSE41-LABEL: floor_v8f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $9, %xmm0, %xmm0
; SSE41-NEXT: roundps $9, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: floor_v8f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $9, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: floor_v8f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $9, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p)
@@ -87,7 +87,7 @@ declare <8 x float> @llvm.floor.v8f32(<8
define <8 x double> @floor_v8f64(<8 x double> %p){
; SSE41-LABEL: floor_v8f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $9, %xmm0, %xmm0
; SSE41-NEXT: roundpd $9, %xmm1, %xmm1
; SSE41-NEXT: roundpd $9, %xmm2, %xmm2
@@ -95,13 +95,13 @@ define <8 x double> @floor_v8f64(<8 x do
; SSE41-NEXT: retq
;
; AVX-LABEL: floor_v8f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $9, %ymm0, %ymm0
; AVX-NEXT: vroundpd $9, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: floor_v8f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $9, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <8 x double> @llvm.floor.v8f64(<8 x double> %p)
@@ -111,7 +111,7 @@ declare <8 x double> @llvm.floor.v8f64(<
define <16 x float> @floor_v16f32(<16 x float> %p) {
; SSE41-LABEL: floor_v16f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $9, %xmm0, %xmm0
; SSE41-NEXT: roundps $9, %xmm1, %xmm1
; SSE41-NEXT: roundps $9, %xmm2, %xmm2
@@ -119,13 +119,13 @@ define <16 x float> @floor_v16f32(<16 x
; SSE41-NEXT: retq
;
; AVX-LABEL: floor_v16f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $9, %ymm0, %ymm0
; AVX-NEXT: vroundps $9, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: floor_v16f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $9, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <16 x float> @llvm.floor.v16f32(<16 x float> %p)
@@ -135,17 +135,17 @@ declare <16 x float> @llvm.floor.v16f32(
define <2 x double> @ceil_v2f64(<2 x double> %p) {
; SSE41-LABEL: ceil_v2f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $10, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: ceil_v2f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $10, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: ceil_v2f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $10, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p)
@@ -155,17 +155,17 @@ declare <2 x double> @llvm.ceil.v2f64(<2
define <4 x float> @ceil_v4f32(<4 x float> %p) {
; SSE41-LABEL: ceil_v4f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $10, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: ceil_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $10, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: ceil_v4f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $10, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p)
@@ -175,18 +175,18 @@ declare <4 x float> @llvm.ceil.v4f32(<4
define <4 x double> @ceil_v4f64(<4 x double> %p) {
; SSE41-LABEL: ceil_v4f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $10, %xmm0, %xmm0
; SSE41-NEXT: roundpd $10, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: ceil_v4f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $10, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: ceil_v4f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $10, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p)
@@ -196,18 +196,18 @@ declare <4 x double> @llvm.ceil.v4f64(<4
define <8 x float> @ceil_v8f32(<8 x float> %p) {
; SSE41-LABEL: ceil_v8f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $10, %xmm0, %xmm0
; SSE41-NEXT: roundps $10, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: ceil_v8f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $10, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: ceil_v8f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $10, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p)
@@ -217,7 +217,7 @@ declare <8 x float> @llvm.ceil.v8f32(<8
define <8 x double> @ceil_v8f64(<8 x double> %p){
; SSE41-LABEL: ceil_v8f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $10, %xmm0, %xmm0
; SSE41-NEXT: roundpd $10, %xmm1, %xmm1
; SSE41-NEXT: roundpd $10, %xmm2, %xmm2
@@ -225,13 +225,13 @@ define <8 x double> @ceil_v8f64(<8 x dou
; SSE41-NEXT: retq
;
; AVX-LABEL: ceil_v8f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $10, %ymm0, %ymm0
; AVX-NEXT: vroundpd $10, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: ceil_v8f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $10, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <8 x double> @llvm.ceil.v8f64(<8 x double> %p)
@@ -241,7 +241,7 @@ declare <8 x double> @llvm.ceil.v8f64(<8
define <16 x float> @ceil_v16f32(<16 x float> %p) {
; SSE41-LABEL: ceil_v16f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $10, %xmm0, %xmm0
; SSE41-NEXT: roundps $10, %xmm1, %xmm1
; SSE41-NEXT: roundps $10, %xmm2, %xmm2
@@ -249,13 +249,13 @@ define <16 x float> @ceil_v16f32(<16 x f
; SSE41-NEXT: retq
;
; AVX-LABEL: ceil_v16f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $10, %ymm0, %ymm0
; AVX-NEXT: vroundps $10, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: ceil_v16f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $10, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <16 x float> @llvm.ceil.v16f32(<16 x float> %p)
@@ -265,17 +265,17 @@ declare <16 x float> @llvm.ceil.v16f32(<
define <2 x double> @trunc_v2f64(<2 x double> %p) {
; SSE41-LABEL: trunc_v2f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_v2f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $11, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: trunc_v2f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $11, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p)
@@ -285,17 +285,17 @@ declare <2 x double> @llvm.trunc.v2f64(<
define <4 x float> @trunc_v4f32(<4 x float> %p) {
; SSE41-LABEL: trunc_v4f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $11, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: trunc_v4f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $11, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p)
@@ -305,18 +305,18 @@ declare <4 x float> @llvm.trunc.v4f32(<4
define <4 x double> @trunc_v4f64(<4 x double> %p) {
; SSE41-LABEL: trunc_v4f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
; SSE41-NEXT: roundpd $11, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_v4f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $11, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: trunc_v4f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $11, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p)
@@ -326,18 +326,18 @@ declare <4 x double> @llvm.trunc.v4f64(<
define <8 x float> @trunc_v8f32(<8 x float> %p) {
; SSE41-LABEL: trunc_v8f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $11, %xmm0, %xmm0
; SSE41-NEXT: roundps $11, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_v8f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $11, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: trunc_v8f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $11, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p)
@@ -347,7 +347,7 @@ declare <8 x float> @llvm.trunc.v8f32(<8
define <8 x double> @trunc_v8f64(<8 x double> %p){
; SSE41-LABEL: trunc_v8f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
; SSE41-NEXT: roundpd $11, %xmm1, %xmm1
; SSE41-NEXT: roundpd $11, %xmm2, %xmm2
@@ -355,13 +355,13 @@ define <8 x double> @trunc_v8f64(<8 x do
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_v8f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $11, %ymm0, %ymm0
; AVX-NEXT: vroundpd $11, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: trunc_v8f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $11, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <8 x double> @llvm.trunc.v8f64(<8 x double> %p)
@@ -371,7 +371,7 @@ declare <8 x double> @llvm.trunc.v8f64(<
define <16 x float> @trunc_v16f32(<16 x float> %p) {
; SSE41-LABEL: trunc_v16f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $11, %xmm0, %xmm0
; SSE41-NEXT: roundps $11, %xmm1, %xmm1
; SSE41-NEXT: roundps $11, %xmm2, %xmm2
@@ -379,13 +379,13 @@ define <16 x float> @trunc_v16f32(<16 x
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_v16f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $11, %ymm0, %ymm0
; AVX-NEXT: vroundps $11, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: trunc_v16f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $11, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <16 x float> @llvm.trunc.v16f32(<16 x float> %p)
@@ -395,17 +395,17 @@ declare <16 x float> @llvm.trunc.v16f32(
define <2 x double> @rint_v2f64(<2 x double> %p) {
; SSE41-LABEL: rint_v2f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $4, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: rint_v2f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $4, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: rint_v2f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $4, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <2 x double> @llvm.rint.v2f64(<2 x double> %p)
@@ -415,17 +415,17 @@ declare <2 x double> @llvm.rint.v2f64(<2
define <4 x float> @rint_v4f32(<4 x float> %p) {
; SSE41-LABEL: rint_v4f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $4, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: rint_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $4, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: rint_v4f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $4, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <4 x float> @llvm.rint.v4f32(<4 x float> %p)
@@ -435,18 +435,18 @@ declare <4 x float> @llvm.rint.v4f32(<4
define <4 x double> @rint_v4f64(<4 x double> %p) {
; SSE41-LABEL: rint_v4f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $4, %xmm0, %xmm0
; SSE41-NEXT: roundpd $4, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: rint_v4f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $4, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: rint_v4f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $4, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <4 x double> @llvm.rint.v4f64(<4 x double> %p)
@@ -456,18 +456,18 @@ declare <4 x double> @llvm.rint.v4f64(<4
define <8 x float> @rint_v8f32(<8 x float> %p) {
; SSE41-LABEL: rint_v8f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $4, %xmm0, %xmm0
; SSE41-NEXT: roundps $4, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: rint_v8f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $4, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: rint_v8f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $4, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <8 x float> @llvm.rint.v8f32(<8 x float> %p)
@@ -477,7 +477,7 @@ declare <8 x float> @llvm.rint.v8f32(<8
define <8 x double> @rint_v8f64(<8 x double> %p){
; SSE41-LABEL: rint_v8f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $4, %xmm0, %xmm0
; SSE41-NEXT: roundpd $4, %xmm1, %xmm1
; SSE41-NEXT: roundpd $4, %xmm2, %xmm2
@@ -485,13 +485,13 @@ define <8 x double> @rint_v8f64(<8 x dou
; SSE41-NEXT: retq
;
; AVX-LABEL: rint_v8f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $4, %ymm0, %ymm0
; AVX-NEXT: vroundpd $4, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: rint_v8f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $4, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <8 x double> @llvm.rint.v8f64(<8 x double> %p)
@@ -501,7 +501,7 @@ declare <8 x double> @llvm.rint.v8f64(<8
define <16 x float> @rint_v16f32(<16 x float> %p) {
; SSE41-LABEL: rint_v16f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $4, %xmm0, %xmm0
; SSE41-NEXT: roundps $4, %xmm1, %xmm1
; SSE41-NEXT: roundps $4, %xmm2, %xmm2
@@ -509,13 +509,13 @@ define <16 x float> @rint_v16f32(<16 x f
; SSE41-NEXT: retq
;
; AVX-LABEL: rint_v16f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $4, %ymm0, %ymm0
; AVX-NEXT: vroundps $4, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: rint_v16f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $4, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <16 x float> @llvm.rint.v16f32(<16 x float> %p)
@@ -525,17 +525,17 @@ declare <16 x float> @llvm.rint.v16f32(<
define <2 x double> @nearbyint_v2f64(<2 x double> %p) {
; SSE41-LABEL: nearbyint_v2f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $12, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: nearbyint_v2f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $12, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: nearbyint_v2f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $12, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p)
@@ -545,17 +545,17 @@ declare <2 x double> @llvm.nearbyint.v2f
define <4 x float> @nearbyint_v4f32(<4 x float> %p) {
; SSE41-LABEL: nearbyint_v4f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $12, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: nearbyint_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $12, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: nearbyint_v4f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $12, %xmm0, %xmm0
; AVX512-NEXT: retq
%t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p)
@@ -565,18 +565,18 @@ declare <4 x float> @llvm.nearbyint.v4f3
define <4 x double> @nearbyint_v4f64(<4 x double> %p) {
; SSE41-LABEL: nearbyint_v4f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $12, %xmm0, %xmm0
; SSE41-NEXT: roundpd $12, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: nearbyint_v4f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $12, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: nearbyint_v4f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $12, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p)
@@ -586,18 +586,18 @@ declare <4 x double> @llvm.nearbyint.v4f
define <8 x float> @nearbyint_v8f32(<8 x float> %p) {
; SSE41-LABEL: nearbyint_v8f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $12, %xmm0, %xmm0
; SSE41-NEXT: roundps $12, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: nearbyint_v8f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $12, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: nearbyint_v8f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $12, %ymm0, %ymm0
; AVX512-NEXT: retq
%t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p)
@@ -607,7 +607,7 @@ declare <8 x float> @llvm.nearbyint.v8f3
define <8 x double> @nearbyint_v8f64(<8 x double> %p){
; SSE41-LABEL: nearbyint_v8f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundpd $12, %xmm0, %xmm0
; SSE41-NEXT: roundpd $12, %xmm1, %xmm1
; SSE41-NEXT: roundpd $12, %xmm2, %xmm2
@@ -615,13 +615,13 @@ define <8 x double> @nearbyint_v8f64(<8
; SSE41-NEXT: retq
;
; AVX-LABEL: nearbyint_v8f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundpd $12, %ymm0, %ymm0
; AVX-NEXT: vroundpd $12, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: nearbyint_v8f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscalepd $12, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <8 x double> @llvm.nearbyint.v8f64(<8 x double> %p)
@@ -631,7 +631,7 @@ declare <8 x double> @llvm.nearbyint.v8f
define <16 x float> @nearbyint_v16f32(<16 x float> %p) {
; SSE41-LABEL: nearbyint_v16f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: roundps $12, %xmm0, %xmm0
; SSE41-NEXT: roundps $12, %xmm1, %xmm1
; SSE41-NEXT: roundps $12, %xmm2, %xmm2
@@ -639,13 +639,13 @@ define <16 x float> @nearbyint_v16f32(<1
; SSE41-NEXT: retq
;
; AVX-LABEL: nearbyint_v16f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vroundps $12, %ymm0, %ymm0
; AVX-NEXT: vroundps $12, %ymm1, %ymm1
; AVX-NEXT: retq
;
; AVX512-LABEL: nearbyint_v16f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vrndscaleps $12, %zmm0, %zmm0
; AVX512-NEXT: retq
%t = call <16 x float> @llvm.nearbyint.v16f32(<16 x float> %p)
@@ -659,17 +659,17 @@ declare <16 x float> @llvm.nearbyint.v16
define <2 x double> @const_floor_v2f64() {
; SSE41-LABEL: const_floor_v2f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-2.000000e+00,2.000000e+00]
; SSE41-NEXT: retq
;
; AVX-LABEL: const_floor_v2f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-2.000000e+00,2.000000e+00]
; AVX-NEXT: retq
;
; AVX512-LABEL: const_floor_v2f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-2.000000e+00,2.000000e+00]
; AVX512-NEXT: retq
%t = call <2 x double> @llvm.floor.v2f64(<2 x double> <double -1.5, double 2.5>)
@@ -678,17 +678,17 @@ define <2 x double> @const_floor_v2f64()
define <4 x float> @const_floor_v4f32() {
; SSE41-LABEL: const_floor_v4f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-4.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
; SSE41-NEXT: retq
;
; AVX-LABEL: const_floor_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-4.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
; AVX-NEXT: retq
;
; AVX512-LABEL: const_floor_v4f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-4.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
; AVX512-NEXT: retq
%t = call <4 x float> @llvm.floor.v4f32(<4 x float> <float -3.5, float 6.0, float -9.0, float 2.5>)
@@ -697,17 +697,17 @@ define <4 x float> @const_floor_v4f32()
define <2 x double> @const_ceil_v2f64() {
; SSE41-LABEL: const_ceil_v2f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-1.000000e+00,3.000000e+00]
; SSE41-NEXT: retq
;
; AVX-LABEL: const_ceil_v2f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-1.000000e+00,3.000000e+00]
; AVX-NEXT: retq
;
; AVX512-LABEL: const_ceil_v2f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-1.000000e+00,3.000000e+00]
; AVX512-NEXT: retq
%t = call <2 x double> @llvm.ceil.v2f64(<2 x double> <double -1.5, double 2.5>)
@@ -716,17 +716,17 @@ define <2 x double> @const_ceil_v2f64()
define <4 x float> @const_ceil_v4f32() {
; SSE41-LABEL: const_ceil_v4f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,3.000000e+00]
; SSE41-NEXT: retq
;
; AVX-LABEL: const_ceil_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,3.000000e+00]
; AVX-NEXT: retq
;
; AVX512-LABEL: const_ceil_v4f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,3.000000e+00]
; AVX512-NEXT: retq
%t = call <4 x float> @llvm.ceil.v4f32(<4 x float> <float -3.5, float 6.0, float -9.0, float 2.5>)
@@ -735,17 +735,17 @@ define <4 x float> @const_ceil_v4f32() {
define <2 x double> @const_trunc_v2f64() {
; SSE41-LABEL: const_trunc_v2f64:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-1.000000e+00,2.000000e+00]
; SSE41-NEXT: retq
;
; AVX-LABEL: const_trunc_v2f64:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-1.000000e+00,2.000000e+00]
; AVX-NEXT: retq
;
; AVX512-LABEL: const_trunc_v2f64:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-1.000000e+00,2.000000e+00]
; AVX512-NEXT: retq
%t = call <2 x double> @llvm.trunc.v2f64(<2 x double> <double -1.5, double 2.5>)
@@ -754,17 +754,17 @@ define <2 x double> @const_trunc_v2f64()
define <4 x float> @const_trunc_v4f32() {
; SSE41-LABEL: const_trunc_v4f32:
-; SSE41: ## BB#0:
+; SSE41: ## %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
; SSE41-NEXT: retq
;
; AVX-LABEL: const_trunc_v4f32:
-; AVX: ## BB#0:
+; AVX: ## %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
; AVX-NEXT: retq
;
; AVX512-LABEL: const_trunc_v4f32:
-; AVX512: ## BB#0:
+; AVX512: ## %bb.0:
; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
; AVX512-NEXT: retq
%t = call <4 x float> @llvm.trunc.v4f32(<4 x float> <float -3.5, float 6.0, float -9.0, float 2.5>)
Modified: llvm/trunk/test/CodeGen/X86/vec_fneg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_fneg.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_fneg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_fneg.ll Mon Dec 4 09:18:51 2017
@@ -9,12 +9,12 @@
; This test verifies that we use an xor with a constant to flip the sign bits; no subtraction needed.
define <4 x float> @t1(<4 x float> %Q) nounwind {
; X32-SSE-LABEL: t1:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: t1:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: xorps {{.*}}(%rip), %xmm0
; X64-SSE-NEXT: retq
%tmp = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %Q
@@ -24,14 +24,14 @@ define <4 x float> @t1(<4 x float> %Q) n
; This test verifies that we generate an FP subtraction because "0.0 - x" is not an fneg.
define <4 x float> @t2(<4 x float> %Q) nounwind {
; X32-SSE-LABEL: t2:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: xorps %xmm1, %xmm1
; X32-SSE-NEXT: subps %xmm0, %xmm1
; X32-SSE-NEXT: movaps %xmm1, %xmm0
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: t2:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: xorps %xmm1, %xmm1
; X64-SSE-NEXT: subps %xmm0, %xmm1
; X64-SSE-NEXT: movaps %xmm1, %xmm0
@@ -53,7 +53,7 @@ define <4 x float> @t2(<4 x float> %Q) n
define <2 x float> @fneg_bitcast(i64 %i) nounwind {
; X32-SSE1-LABEL: fneg_bitcast:
-; X32-SSE1: # BB#0:
+; X32-SSE1: # %bb.0:
; X32-SSE1-NEXT: pushl %ebp
; X32-SSE1-NEXT: movl %esp, %ebp
; X32-SSE1-NEXT: andl $-16, %esp
@@ -70,7 +70,7 @@ define <2 x float> @fneg_bitcast(i64 %i)
; X32-SSE1-NEXT: retl
;
; X32-SSE2-LABEL: fneg_bitcast:
-; X32-SSE2: # BB#0:
+; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: movl $-2147483648, %eax # imm = 0x80000000
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE2-NEXT: xorl %eax, %ecx
@@ -81,7 +81,7 @@ define <2 x float> @fneg_bitcast(i64 %i)
; X32-SSE2-NEXT: retl
;
; X64-SSE1-LABEL: fneg_bitcast:
-; X64-SSE1: # BB#0:
+; X64-SSE1: # %bb.0:
; X64-SSE1-NEXT: movabsq $-9223372034707292160, %rax # imm = 0x8000000080000000
; X64-SSE1-NEXT: xorq %rdi, %rax
; X64-SSE1-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
@@ -89,7 +89,7 @@ define <2 x float> @fneg_bitcast(i64 %i)
; X64-SSE1-NEXT: retq
;
; X64-SSE2-LABEL: fneg_bitcast:
-; X64-SSE2: # BB#0:
+; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: movabsq $-9223372034707292160, %rax # imm = 0x8000000080000000
; X64-SSE2-NEXT: xorq %rdi, %rax
; X64-SSE2-NEXT: movq %rax, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/vec_fp_to_int.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_fp_to_int.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_fp_to_int.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_fp_to_int.ll Mon Dec 4 09:18:51 2017
@@ -18,7 +18,7 @@
define <2 x i64> @fptosi_2f64_to_2i64(<2 x double> %a) {
; SSE-LABEL: fptosi_2f64_to_2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttsd2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -29,7 +29,7 @@ define <2 x i64> @fptosi_2f64_to_2i64(<2
; SSE-NEXT: retq
;
; VEX-LABEL: fptosi_2f64_to_2i64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vcvttsd2si %xmm0, %rax
; VEX-NEXT: vmovq %rax, %xmm1
; VEX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -39,7 +39,7 @@ define <2 x i64> @fptosi_2f64_to_2i64(<2
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptosi_2f64_to_2i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcvttsd2si %xmm0, %rax
; AVX512F-NEXT: vmovq %rax, %xmm1
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -49,7 +49,7 @@ define <2 x i64> @fptosi_2f64_to_2i64(<2
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptosi_2f64_to_2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttsd2si %xmm0, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -59,7 +59,7 @@ define <2 x i64> @fptosi_2f64_to_2i64(<2
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptosi_2f64_to_2i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -67,7 +67,7 @@ define <2 x i64> @fptosi_2f64_to_2i64(<2
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_2f64_to_2i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttpd2qq %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = fptosi <2 x double> %a to <2 x i64>
@@ -76,12 +76,12 @@ define <2 x i64> @fptosi_2f64_to_2i64(<2
define <4 x i32> @fptosi_2f64_to_4i32(<2 x double> %a) {
; SSE-LABEL: fptosi_2f64_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_2f64_to_4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvttpd2dq %xmm0, %xmm0
; AVX-NEXT: retq
%cvt = fptosi <2 x double> %a to <2 x i32>
@@ -91,13 +91,13 @@ define <4 x i32> @fptosi_2f64_to_4i32(<2
define <2 x i32> @fptosi_2f64_to_2i32(<2 x double> %a) {
; SSE-LABEL: fptosi_2f64_to_2i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_2f64_to_2i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvttpd2dq %xmm0, %xmm0
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX-NEXT: retq
@@ -107,14 +107,14 @@ define <2 x i32> @fptosi_2f64_to_2i32(<2
define <4 x i32> @fptosi_4f64_to_2i32(<2 x double> %a) {
; SSE-LABEL: fptosi_4f64_to_2i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttpd2dq %xmm0, %xmm1
; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_4f64_to_2i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0
; AVX-NEXT: vzeroupper
@@ -126,7 +126,7 @@ define <4 x i32> @fptosi_4f64_to_2i32(<2
define <4 x i64> @fptosi_4f64_to_4i64(<4 x double> %a) {
; SSE-LABEL: fptosi_4f64_to_4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttsd2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -144,7 +144,7 @@ define <4 x i64> @fptosi_4f64_to_4i64(<4
; SSE-NEXT: retq
;
; AVX1-LABEL: fptosi_4f64_to_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vcvttsd2si %xmm1, %rax
; AVX1-NEXT: vmovq %rax, %xmm2
@@ -162,7 +162,7 @@ define <4 x i64> @fptosi_4f64_to_4i64(<4
; AVX1-NEXT: retq
;
; AVX2-LABEL: fptosi_4f64_to_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vcvttsd2si %xmm1, %rax
; AVX2-NEXT: vmovq %rax, %xmm2
@@ -180,7 +180,7 @@ define <4 x i64> @fptosi_4f64_to_4i64(<4
; AVX2-NEXT: retq
;
; AVX512F-LABEL: fptosi_4f64_to_4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vcvttsd2si %xmm1, %rax
; AVX512F-NEXT: vmovq %rax, %xmm2
@@ -198,7 +198,7 @@ define <4 x i64> @fptosi_4f64_to_4i64(<4
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptosi_4f64_to_4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vcvttsd2si %xmm1, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm2
@@ -216,14 +216,14 @@ define <4 x i64> @fptosi_4f64_to_4i64(<4
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptosi_4f64_to_4i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_4f64_to_4i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttpd2qq %ymm0, %ymm0
; AVX512VLDQ-NEXT: retq
%cvt = fptosi <4 x double> %a to <4 x i64>
@@ -232,14 +232,14 @@ define <4 x i64> @fptosi_4f64_to_4i64(<4
define <4 x i32> @fptosi_4f64_to_4i32(<4 x double> %a) {
; SSE-LABEL: fptosi_4f64_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttpd2dq %xmm1, %xmm1
; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_4f64_to_4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
@@ -253,7 +253,7 @@ define <4 x i32> @fptosi_4f64_to_4i32(<4
define <2 x i64> @fptoui_2f64_to_2i64(<2 x double> %a) {
; SSE-LABEL: fptoui_2f64_to_2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: subsd %xmm2, %xmm1
@@ -278,7 +278,7 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_2f64_to_2i64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; VEX-NEXT: vsubsd %xmm1, %xmm0, %xmm2
; VEX-NEXT: vcvttsd2si %xmm2, %rax
@@ -300,7 +300,7 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_2f64_to_2i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcvttsd2usi %xmm0, %rax
; AVX512F-NEXT: vmovq %rax, %xmm1
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -310,7 +310,7 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_2f64_to_2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttsd2usi %xmm0, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
@@ -320,7 +320,7 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_2f64_to_2i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2uqq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -328,7 +328,7 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_2f64_to_2i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttpd2uqq %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <2 x double> %a to <2 x i64>
@@ -337,7 +337,7 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2
define <4 x i32> @fptoui_2f64_to_4i32(<2 x double> %a) {
; SSE-LABEL: fptoui_2f64_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: subsd %xmm2, %xmm1
@@ -364,7 +364,7 @@ define <4 x i32> @fptoui_2f64_to_4i32(<2
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_2f64_to_4i32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; VEX-NEXT: vsubsd %xmm1, %xmm0, %xmm2
; VEX-NEXT: vcvttsd2si %xmm2, %rax
@@ -387,7 +387,7 @@ define <4 x i32> @fptoui_2f64_to_4i32(<2
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_2f64_to_4i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
@@ -395,12 +395,12 @@ define <4 x i32> @fptoui_2f64_to_4i32(<2
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_2f64_to_4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttpd2udq %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_2f64_to_4i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
@@ -408,7 +408,7 @@ define <4 x i32> @fptoui_2f64_to_4i32(<2
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_2f64_to_4i32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttpd2udq %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <2 x double> %a to <2 x i32>
@@ -418,7 +418,7 @@ define <4 x i32> @fptoui_2f64_to_4i32(<2
define <4 x i32> @fptoui_2f64_to_2i32(<2 x double> %a) {
; SSE-LABEL: fptoui_2f64_to_2i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: movapd %xmm0, %xmm2
; SSE-NEXT: subsd %xmm1, %xmm2
@@ -443,7 +443,7 @@ define <4 x i32> @fptoui_2f64_to_2i32(<2
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_2f64_to_2i32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; VEX-NEXT: vsubsd %xmm1, %xmm0, %xmm2
; VEX-NEXT: vcvttsd2si %xmm2, %rax
@@ -466,7 +466,7 @@ define <4 x i32> @fptoui_2f64_to_2i32(<2
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_2f64_to_2i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -474,12 +474,12 @@ define <4 x i32> @fptoui_2f64_to_2i32(<2
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_2f64_to_2i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttpd2udq %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_2f64_to_2i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -487,7 +487,7 @@ define <4 x i32> @fptoui_2f64_to_2i32(<2
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_2f64_to_2i32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttpd2udq %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <2 x double> %a to <2 x i32>
@@ -497,7 +497,7 @@ define <4 x i32> @fptoui_2f64_to_2i32(<2
define <4 x i32> @fptoui_4f64_to_2i32(<2 x double> %a) {
; SSE-LABEL: fptoui_4f64_to_2i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: subsd %xmm2, %xmm1
@@ -529,7 +529,7 @@ define <4 x i32> @fptoui_4f64_to_2i32(<2
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_4f64_to_2i32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; VEX-NEXT: vcvttsd2si %xmm1, %rax
; VEX-NEXT: vcvttsd2si %xmm0, %rcx
@@ -541,7 +541,7 @@ define <4 x i32> @fptoui_4f64_to_2i32(<2
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_4f64_to_2i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -549,14 +549,14 @@ define <4 x i32> @fptoui_4f64_to_2i32(<2
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f64_to_2i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512VL-NEXT: vcvttpd2udq %ymm0, %xmm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f64_to_2i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -564,7 +564,7 @@ define <4 x i32> @fptoui_4f64_to_2i32(<2
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f64_to_2i32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512VLDQ-NEXT: vcvttpd2udq %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
@@ -576,7 +576,7 @@ define <4 x i32> @fptoui_4f64_to_2i32(<2
define <4 x i64> @fptoui_4f64_to_4i64(<4 x double> %a) {
; SSE-LABEL: fptoui_4f64_to_4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movapd %xmm0, %xmm2
; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
; SSE-NEXT: subsd %xmm3, %xmm0
@@ -619,7 +619,7 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4
; SSE-NEXT: retq
;
; AVX1-LABEL: fptoui_4f64_to_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX1-NEXT: vsubsd %xmm1, %xmm2, %xmm3
@@ -659,7 +659,7 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4
; AVX1-NEXT: retq
;
; AVX2-LABEL: fptoui_4f64_to_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vsubsd %xmm1, %xmm2, %xmm3
@@ -699,7 +699,7 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4
; AVX2-NEXT: retq
;
; AVX512F-LABEL: fptoui_4f64_to_4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vcvttsd2usi %xmm1, %rax
; AVX512F-NEXT: vmovq %rax, %xmm2
@@ -717,7 +717,7 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f64_to_4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vcvttsd2usi %xmm1, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm2
@@ -735,14 +735,14 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f64_to_4i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2uqq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f64_to_4i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttpd2uqq %ymm0, %ymm0
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <4 x double> %a to <4 x i64>
@@ -751,7 +751,7 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4
define <4 x i32> @fptoui_4f64_to_4i32(<4 x double> %a) {
; SSE-LABEL: fptoui_4f64_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: movapd %xmm1, %xmm3
; SSE-NEXT: subsd %xmm2, %xmm3
@@ -795,7 +795,7 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_4f64_to_4i32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; VEX-NEXT: vcvttsd2si %xmm1, %rax
; VEX-NEXT: vcvttsd2si %xmm0, %rcx
@@ -811,7 +811,7 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_4f64_to_4i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -819,13 +819,13 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f64_to_4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttpd2udq %ymm0, %xmm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f64_to_4i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -833,7 +833,7 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f64_to_4i32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttpd2udq %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
@@ -847,13 +847,13 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4
define <2 x i32> @fptosi_2f32_to_2i32(<2 x float> %a) {
; SSE-LABEL: fptosi_2f32_to_2i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttps2dq %xmm0, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_2f32_to_2i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX-NEXT: retq
@@ -863,12 +863,12 @@ define <2 x i32> @fptosi_2f32_to_2i32(<2
define <4 x i32> @fptosi_4f32_to_4i32(<4 x float> %a) {
; SSE-LABEL: fptosi_4f32_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttps2dq %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_4f32_to_4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
; AVX-NEXT: retq
%cvt = fptosi <4 x float> %a to <4 x i32>
@@ -877,7 +877,7 @@ define <4 x i32> @fptosi_4f32_to_4i32(<4
define <2 x i64> @fptosi_2f32_to_2i64(<4 x float> %a) {
; SSE-LABEL: fptosi_2f32_to_2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
@@ -888,7 +888,7 @@ define <2 x i64> @fptosi_2f32_to_2i64(<4
; SSE-NEXT: retq
;
; VEX-LABEL: fptosi_2f32_to_2i64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vcvttss2si %xmm0, %rax
; VEX-NEXT: vmovq %rax, %xmm1
; VEX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -898,7 +898,7 @@ define <2 x i64> @fptosi_2f32_to_2i64(<4
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptosi_2f32_to_2i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcvttss2si %xmm0, %rax
; AVX512F-NEXT: vmovq %rax, %xmm1
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -908,7 +908,7 @@ define <2 x i64> @fptosi_2f32_to_2i64(<4
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptosi_2f32_to_2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttss2si %xmm0, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm1
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -918,7 +918,7 @@ define <2 x i64> @fptosi_2f32_to_2i64(<4
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptosi_2f32_to_2i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttss2si %xmm0, %rax
; AVX512DQ-NEXT: vmovq %rax, %xmm1
; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -928,7 +928,7 @@ define <2 x i64> @fptosi_2f32_to_2i64(<4
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_2f32_to_2i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2qq %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%shuf = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 1>
@@ -938,7 +938,7 @@ define <2 x i64> @fptosi_2f32_to_2i64(<4
define <2 x i64> @fptosi_4f32_to_2i64(<4 x float> %a) {
; SSE-LABEL: fptosi_4f32_to_2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
@@ -949,7 +949,7 @@ define <2 x i64> @fptosi_4f32_to_2i64(<4
; SSE-NEXT: retq
;
; VEX-LABEL: fptosi_4f32_to_2i64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; VEX-NEXT: vcvttss2si %xmm1, %rax
; VEX-NEXT: vcvttss2si %xmm0, %rcx
@@ -959,7 +959,7 @@ define <2 x i64> @fptosi_4f32_to_2i64(<4
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptosi_4f32_to_2i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512F-NEXT: vcvttss2si %xmm1, %rax
; AVX512F-NEXT: vcvttss2si %xmm0, %rcx
@@ -969,7 +969,7 @@ define <2 x i64> @fptosi_4f32_to_2i64(<4
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptosi_4f32_to_2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vcvttss2si %xmm1, %rax
; AVX512VL-NEXT: vcvttss2si %xmm0, %rcx
@@ -979,7 +979,7 @@ define <2 x i64> @fptosi_4f32_to_2i64(<4
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptosi_4f32_to_2i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -987,7 +987,7 @@ define <2 x i64> @fptosi_4f32_to_2i64(<4
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_4f32_to_2i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2qq %xmm0, %ymm0
; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VLDQ-NEXT: vzeroupper
@@ -999,13 +999,13 @@ define <2 x i64> @fptosi_4f32_to_2i64(<4
define <8 x i32> @fptosi_8f32_to_8i32(<8 x float> %a) {
; SSE-LABEL: fptosi_8f32_to_8i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttps2dq %xmm0, %xmm0
; SSE-NEXT: cvttps2dq %xmm1, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_8f32_to_8i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvttps2dq %ymm0, %ymm0
; AVX-NEXT: retq
%cvt = fptosi <8 x float> %a to <8 x i32>
@@ -1014,7 +1014,7 @@ define <8 x i32> @fptosi_8f32_to_8i32(<8
define <4 x i64> @fptosi_4f32_to_4i64(<8 x float> %a) {
; SSE-LABEL: fptosi_4f32_to_4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm1
@@ -1034,7 +1034,7 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8
; SSE-NEXT: retq
;
; AVX1-LABEL: fptosi_4f32_to_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX1-NEXT: vcvttss2si %xmm1, %rax
; AVX1-NEXT: vmovq %rax, %xmm1
@@ -1052,7 +1052,7 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8
; AVX1-NEXT: retq
;
; AVX2-LABEL: fptosi_4f32_to_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX2-NEXT: vcvttss2si %xmm1, %rax
; AVX2-NEXT: vmovq %rax, %xmm1
@@ -1070,7 +1070,7 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8
; AVX2-NEXT: retq
;
; AVX512F-LABEL: fptosi_4f32_to_4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX512F-NEXT: vcvttss2si %xmm1, %rax
; AVX512F-NEXT: vmovq %rax, %xmm1
@@ -1088,7 +1088,7 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptosi_4f32_to_4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX512VL-NEXT: vcvttss2si %xmm1, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm1
@@ -1106,13 +1106,13 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptosi_4f32_to_4i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_4f32_to_4i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2qq %xmm0, %ymm0
; AVX512VLDQ-NEXT: retq
%shuf = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1122,7 +1122,7 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8
define <4 x i64> @fptosi_8f32_to_4i64(<8 x float> %a) {
; SSE-LABEL: fptosi_8f32_to_4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: movq %rax, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm1
@@ -1142,7 +1142,7 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8
; SSE-NEXT: retq
;
; AVX1-LABEL: fptosi_8f32_to_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX1-NEXT: vcvttss2si %xmm1, %rax
; AVX1-NEXT: vmovq %rax, %xmm1
@@ -1160,7 +1160,7 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8
; AVX1-NEXT: retq
;
; AVX2-LABEL: fptosi_8f32_to_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX2-NEXT: vcvttss2si %xmm1, %rax
; AVX2-NEXT: vmovq %rax, %xmm1
@@ -1178,7 +1178,7 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8
; AVX2-NEXT: retq
;
; AVX512F-LABEL: fptosi_8f32_to_4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512F-NEXT: vcvttss2si %xmm1, %rax
; AVX512F-NEXT: vcvttss2si %xmm0, %rcx
@@ -1196,7 +1196,7 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptosi_8f32_to_4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vcvttss2si %xmm1, %rax
; AVX512VL-NEXT: vcvttss2si %xmm0, %rcx
@@ -1214,13 +1214,13 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptosi_8f32_to_4i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_8f32_to_4i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2qq %ymm0, %zmm0
; AVX512VLDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512VLDQ-NEXT: retq
@@ -1235,7 +1235,7 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8
define <2 x i32> @fptoui_2f32_to_2i32(<2 x float> %a) {
; SSE-LABEL: fptoui_2f32_to_2i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: subss %xmm2, %xmm1
@@ -1260,7 +1260,7 @@ define <2 x i32> @fptoui_2f32_to_2i32(<2
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_2f32_to_2i32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; VEX-NEXT: vsubss %xmm1, %xmm0, %xmm2
; VEX-NEXT: vcvttss2si %xmm2, %rax
@@ -1282,7 +1282,7 @@ define <2 x i32> @fptoui_2f32_to_2i32(<2
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_2f32_to_2i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
@@ -1290,13 +1290,13 @@ define <2 x i32> @fptoui_2f32_to_2i32(<2
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_2f32_to_2i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttps2udq %xmm0, %xmm0
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_2f32_to_2i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
@@ -1304,7 +1304,7 @@ define <2 x i32> @fptoui_2f32_to_2i32(<2
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_2f32_to_2i32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2udq %xmm0, %xmm0
; AVX512VLDQ-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX512VLDQ-NEXT: retq
@@ -1314,7 +1314,7 @@ define <2 x i32> @fptoui_2f32_to_2i32(<2
define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) {
; SSE-LABEL: fptoui_4f32_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE-NEXT: cvttss2si %xmm1, %rax
@@ -1335,7 +1335,7 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_4f32_to_4i32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; VEX-NEXT: vcvttss2si %xmm1, %rax
; VEX-NEXT: vcvttss2si %xmm0, %rcx
@@ -1350,7 +1350,7 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_4f32_to_4i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1358,12 +1358,12 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f32_to_4i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttps2udq %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f32_to_4i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1371,7 +1371,7 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f32_to_4i32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2udq %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <4 x float> %a to <4 x i32>
@@ -1380,7 +1380,7 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4
define <2 x i64> @fptoui_2f32_to_2i64(<4 x float> %a) {
; SSE-LABEL: fptoui_2f32_to_2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: subss %xmm2, %xmm1
@@ -1405,7 +1405,7 @@ define <2 x i64> @fptoui_2f32_to_2i64(<4
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_2f32_to_2i64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; VEX-NEXT: vsubss %xmm1, %xmm0, %xmm2
; VEX-NEXT: vcvttss2si %xmm2, %rax
@@ -1427,7 +1427,7 @@ define <2 x i64> @fptoui_2f32_to_2i64(<4
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_2f32_to_2i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcvttss2usi %xmm0, %rax
; AVX512F-NEXT: vmovq %rax, %xmm1
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -1437,7 +1437,7 @@ define <2 x i64> @fptoui_2f32_to_2i64(<4
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_2f32_to_2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttss2usi %xmm0, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm1
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -1447,7 +1447,7 @@ define <2 x i64> @fptoui_2f32_to_2i64(<4
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_2f32_to_2i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttss2usi %xmm0, %rax
; AVX512DQ-NEXT: vmovq %rax, %xmm1
; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
@@ -1457,7 +1457,7 @@ define <2 x i64> @fptoui_2f32_to_2i64(<4
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_2f32_to_2i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2uqq %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%shuf = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 1>
@@ -1467,7 +1467,7 @@ define <2 x i64> @fptoui_2f32_to_2i64(<4
define <2 x i64> @fptoui_4f32_to_2i64(<4 x float> %a) {
; SSE-LABEL: fptoui_4f32_to_2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: subss %xmm2, %xmm1
@@ -1492,7 +1492,7 @@ define <2 x i64> @fptoui_4f32_to_2i64(<4
; SSE-NEXT: retq
;
; VEX-LABEL: fptoui_4f32_to_2i64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; VEX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; VEX-NEXT: vsubss %xmm2, %xmm1, %xmm3
@@ -1514,7 +1514,7 @@ define <2 x i64> @fptoui_4f32_to_2i64(<4
; VEX-NEXT: retq
;
; AVX512F-LABEL: fptoui_4f32_to_2i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512F-NEXT: vcvttss2usi %xmm1, %rax
; AVX512F-NEXT: vcvttss2usi %xmm0, %rcx
@@ -1524,7 +1524,7 @@ define <2 x i64> @fptoui_4f32_to_2i64(<4
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f32_to_2i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vcvttss2usi %xmm1, %rax
; AVX512VL-NEXT: vcvttss2usi %xmm0, %rcx
@@ -1534,7 +1534,7 @@ define <2 x i64> @fptoui_4f32_to_2i64(<4
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f32_to_2i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1542,7 +1542,7 @@ define <2 x i64> @fptoui_4f32_to_2i64(<4
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f32_to_2i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2uqq %xmm0, %ymm0
; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VLDQ-NEXT: vzeroupper
@@ -1554,7 +1554,7 @@ define <2 x i64> @fptoui_4f32_to_2i64(<4
define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) {
; SSE-LABEL: fptoui_8f32_to_8i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: cvttss2si %xmm0, %rax
@@ -1591,7 +1591,7 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8
; SSE-NEXT: retq
;
; AVX1-LABEL: fptoui_8f32_to_8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vcvttss2si %xmm2, %rax
@@ -1619,7 +1619,7 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8
; AVX1-NEXT: retq
;
; AVX2-LABEL: fptoui_8f32_to_8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vcvttss2si %xmm2, %rax
@@ -1647,26 +1647,26 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8
; AVX2-NEXT: retq
;
; AVX512F-LABEL: fptoui_8f32_to_8i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_8f32_to_8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvttps2udq %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_8f32_to_8i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_8f32_to_8i32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2udq %ymm0, %ymm0
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <8 x float> %a to <8 x i32>
@@ -1675,7 +1675,7 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8
define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
; SSE-LABEL: fptoui_4f32_to_4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: subss %xmm1, %xmm2
@@ -1721,7 +1721,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8
; SSE-NEXT: retq
;
; AVX1-LABEL: fptoui_4f32_to_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX1-NEXT: vsubss %xmm1, %xmm2, %xmm3
@@ -1761,7 +1761,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8
; AVX1-NEXT: retq
;
; AVX2-LABEL: fptoui_4f32_to_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
@@ -1801,7 +1801,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8
; AVX2-NEXT: retq
;
; AVX512F-LABEL: fptoui_4f32_to_4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX512F-NEXT: vcvttss2usi %xmm1, %rax
; AVX512F-NEXT: vmovq %rax, %xmm1
@@ -1819,7 +1819,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f32_to_4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX512VL-NEXT: vcvttss2usi %xmm1, %rax
; AVX512VL-NEXT: vmovq %rax, %xmm1
@@ -1837,13 +1837,13 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f32_to_4i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f32_to_4i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2uqq %xmm0, %ymm0
; AVX512VLDQ-NEXT: retq
%shuf = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1853,7 +1853,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8
define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
; SSE-LABEL: fptoui_8f32_to_4i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: subss %xmm1, %xmm2
@@ -1899,7 +1899,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8
; SSE-NEXT: retq
;
; AVX1-LABEL: fptoui_8f32_to_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX1-NEXT: vsubss %xmm1, %xmm2, %xmm3
@@ -1939,7 +1939,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8
; AVX1-NEXT: retq
;
; AVX2-LABEL: fptoui_8f32_to_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
@@ -1979,7 +1979,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8
; AVX2-NEXT: retq
;
; AVX512F-LABEL: fptoui_8f32_to_4i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512F-NEXT: vcvttss2usi %xmm1, %rax
; AVX512F-NEXT: vcvttss2usi %xmm0, %rcx
@@ -1997,7 +1997,7 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_8f32_to_4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vcvttss2usi %xmm1, %rax
; AVX512VL-NEXT: vcvttss2usi %xmm0, %rcx
@@ -2015,13 +2015,13 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_8f32_to_4i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_8f32_to_4i64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2uqq %ymm0, %zmm0
; AVX512VLDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512VLDQ-NEXT: retq
@@ -2036,12 +2036,12 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8
define <2 x i64> @fptosi_2f64_to_2i64_const() {
; SSE-LABEL: fptosi_2f64_to_2i64_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,18446744073709551615]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_2f64_to_2i64_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,18446744073709551615]
; AVX-NEXT: retq
%cvt = fptosi <2 x double> <double 1.0, double -1.0> to <2 x i64>
@@ -2050,12 +2050,12 @@ define <2 x i64> @fptosi_2f64_to_2i64_co
define <4 x i32> @fptosi_2f64_to_2i32_const() {
; SSE-LABEL: fptosi_2f64_to_2i32_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = <4294967295,1,u,u>
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_2f64_to_2i32_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = <4294967295,1,u,u>
; AVX-NEXT: retq
%cvt = fptosi <2 x double> <double -1.0, double 1.0> to <2 x i32>
@@ -2065,13 +2065,13 @@ define <4 x i32> @fptosi_2f64_to_2i32_co
define <4 x i64> @fptosi_4f64_to_4i64_const() {
; SSE-LABEL: fptosi_4f64_to_4i64_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,18446744073709551615]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [2,18446744073709551613]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_4f64_to_4i64_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,18446744073709551615,2,18446744073709551613]
; AVX-NEXT: retq
%cvt = fptosi <4 x double> <double 1.0, double -1.0, double 2.0, double -3.0> to <4 x i64>
@@ -2080,12 +2080,12 @@ define <4 x i64> @fptosi_4f64_to_4i64_co
define <4 x i32> @fptosi_4f64_to_4i32_const() {
; SSE-LABEL: fptosi_4f64_to_4i32_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,1,4294967294,3]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_4f64_to_4i32_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967295,1,4294967294,3]
; AVX-NEXT: retq
%cvt = fptosi <4 x double> <double -1.0, double 1.0, double -2.0, double 3.0> to <4 x i32>
@@ -2094,12 +2094,12 @@ define <4 x i32> @fptosi_4f64_to_4i32_co
define <2 x i64> @fptoui_2f64_to_2i64_const() {
; SSE-LABEL: fptoui_2f64_to_2i64_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [2,4]
; SSE-NEXT: retq
;
; AVX-LABEL: fptoui_2f64_to_2i64_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [2,4]
; AVX-NEXT: retq
%cvt = fptoui <2 x double> <double 2.0, double 4.0> to <2 x i64>
@@ -2108,12 +2108,12 @@ define <2 x i64> @fptoui_2f64_to_2i64_co
define <4 x i32> @fptoui_2f64_to_2i32_const(<2 x double> %a) {
; SSE-LABEL: fptoui_2f64_to_2i32_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = <2,4,u,u>
; SSE-NEXT: retq
;
; AVX-LABEL: fptoui_2f64_to_2i32_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = <2,4,u,u>
; AVX-NEXT: retq
%cvt = fptoui <2 x double> <double 2.0, double 4.0> to <2 x i32>
@@ -2123,13 +2123,13 @@ define <4 x i32> @fptoui_2f64_to_2i32_co
define <4 x i64> @fptoui_4f64_to_4i64_const(<4 x double> %a) {
; SSE-LABEL: fptoui_4f64_to_4i64_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [2,4]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [6,8]
; SSE-NEXT: retq
;
; AVX-LABEL: fptoui_4f64_to_4i64_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [2,4,6,8]
; AVX-NEXT: retq
%cvt = fptoui <4 x double> <double 2.0, double 4.0, double 6.0, double 8.0> to <4 x i64>
@@ -2138,12 +2138,12 @@ define <4 x i64> @fptoui_4f64_to_4i64_co
define <4 x i32> @fptoui_4f64_to_4i32_const(<4 x double> %a) {
; SSE-LABEL: fptoui_4f64_to_4i32_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [2,4,6,8]
; SSE-NEXT: retq
;
; AVX-LABEL: fptoui_4f64_to_4i32_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [2,4,6,8]
; AVX-NEXT: retq
%cvt = fptoui <4 x double> <double 2.0, double 4.0, double 6.0, double 8.0> to <4 x i32>
@@ -2152,12 +2152,12 @@ define <4 x i32> @fptoui_4f64_to_4i32_co
define <4 x i32> @fptosi_4f32_to_4i32_const() {
; SSE-LABEL: fptosi_4f32_to_4i32_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,4294967295,2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_4f32_to_4i32_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,4294967295,2,3]
; AVX-NEXT: retq
%cvt = fptosi <4 x float> <float 1.0, float -1.0, float 2.0, float 3.0> to <4 x i32>
@@ -2166,13 +2166,13 @@ define <4 x i32> @fptosi_4f32_to_4i32_co
define <4 x i64> @fptosi_4f32_to_4i64_const() {
; SSE-LABEL: fptosi_4f32_to_4i64_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,18446744073709551615]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_4f32_to_4i64_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,18446744073709551615,2,3]
; AVX-NEXT: retq
%cvt = fptosi <4 x float> <float 1.0, float -1.0, float 2.0, float 3.0> to <4 x i64>
@@ -2181,13 +2181,13 @@ define <4 x i64> @fptosi_4f32_to_4i64_co
define <8 x i32> @fptosi_8f32_to_8i32_const(<8 x float> %a) {
; SSE-LABEL: fptosi_8f32_to_8i32_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,4294967295,2,3]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [6,4294967288,2,4294967295]
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_8f32_to_8i32_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,4294967295,2,3,6,4294967288,2,4294967295]
; AVX-NEXT: retq
%cvt = fptosi <8 x float> <float 1.0, float -1.0, float 2.0, float 3.0, float 6.0, float -8.0, float 2.0, float -1.0> to <8 x i32>
@@ -2196,12 +2196,12 @@ define <8 x i32> @fptosi_8f32_to_8i32_co
define <4 x i32> @fptoui_4f32_to_4i32_const(<4 x float> %a) {
; SSE-LABEL: fptoui_4f32_to_4i32_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,2,4,6]
; SSE-NEXT: retq
;
; AVX-LABEL: fptoui_4f32_to_4i32_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,2,4,6]
; AVX-NEXT: retq
%cvt = fptoui <4 x float> <float 1.0, float 2.0, float 4.0, float 6.0> to <4 x i32>
@@ -2210,13 +2210,13 @@ define <4 x i32> @fptoui_4f32_to_4i32_co
define <4 x i64> @fptoui_4f32_to_4i64_const() {
; SSE-LABEL: fptoui_4f32_to_4i64_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,2]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [4,8]
; SSE-NEXT: retq
;
; AVX-LABEL: fptoui_4f32_to_4i64_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,2,4,8]
; AVX-NEXT: retq
%cvt = fptoui <4 x float> <float 1.0, float 2.0, float 4.0, float 8.0> to <4 x i64>
@@ -2225,13 +2225,13 @@ define <4 x i64> @fptoui_4f32_to_4i64_co
define <8 x i32> @fptoui_8f32_to_8i32_const(<8 x float> %a) {
; SSE-LABEL: fptoui_8f32_to_8i32_const:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,2,4,6]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [8,6,4,1]
; SSE-NEXT: retq
;
; AVX-LABEL: fptoui_8f32_to_8i32_const:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,2,4,6,8,6,4,1]
; AVX-NEXT: retq
%cvt = fptoui <8 x float> <float 1.0, float 2.0, float 4.0, float 6.0, float 8.0, float 6.0, float 4.0, float 1.0> to <8 x i32>
@@ -2244,7 +2244,7 @@ define <8 x i32> @fptoui_8f32_to_8i32_co
define <4 x i32> @fptosi_2f16_to_4i32(<2 x half> %a) nounwind {
; SSE-LABEL: fptosi_2f16_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pushq %rax
; SSE-NEXT: movss %xmm1, {{[0-9]+}}(%rsp) # 4-byte Spill
; SSE-NEXT: callq __gnu_f2h_ieee
@@ -2267,7 +2267,7 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2
; SSE-NEXT: retq
;
; VEX-LABEL: fptosi_2f16_to_4i32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: pushq %rax
; VEX-NEXT: vmovss %xmm1, {{[0-9]+}}(%rsp) # 4-byte Spill
; VEX-NEXT: callq __gnu_f2h_ieee
@@ -2289,7 +2289,7 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2
; VEX-NEXT: retq
;
; AVX512-LABEL: fptosi_2f16_to_4i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
@@ -2308,7 +2308,7 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2
define <4 x i32> @fptosi_2f80_to_4i32(<2 x x86_fp80> %a) nounwind {
; SSE-LABEL: fptosi_2f80_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: fldt {{[0-9]+}}(%rsp)
; SSE-NEXT: fldt {{[0-9]+}}(%rsp)
; SSE-NEXT: fnstcw -{{[0-9]+}}(%rsp)
@@ -2333,7 +2333,7 @@ define <4 x i32> @fptosi_2f80_to_4i32(<2
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_2f80_to_4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: fldt {{[0-9]+}}(%rsp)
; AVX-NEXT: fldt {{[0-9]+}}(%rsp)
; AVX-NEXT: fisttpll -{{[0-9]+}}(%rsp)
@@ -2350,7 +2350,7 @@ define <4 x i32> @fptosi_2f80_to_4i32(<2
define <4 x i32> @fptosi_2f128_to_4i32(<2 x fp128> %a) nounwind {
; SSE-LABEL: fptosi_2f128_to_4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pushq %r14
; SSE-NEXT: pushq %rbx
; SSE-NEXT: subq $24, %rsp
@@ -2375,7 +2375,7 @@ define <4 x i32> @fptosi_2f128_to_4i32(<
; SSE-NEXT: retq
;
; AVX-LABEL: fptosi_2f128_to_4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: pushq %r14
; AVX-NEXT: pushq %rbx
; AVX-NEXT: subq $24, %rsp
Modified: llvm/trunk/test/CodeGen/X86/vec_fpext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_fpext.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_fpext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_fpext.ll Mon Dec 4 09:18:51 2017
@@ -9,7 +9,7 @@
; PR11674
define void @fpext_frommem(<2 x float>* %in, <2 x double>* %out) {
; X32-SSE-LABEL: fpext_frommem:
-; X32-SSE: # BB#0: # %entry
+; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-SSE-NEXT: cvtps2pd (%ecx), %xmm0 # encoding: [0x0f,0x5a,0x01]
@@ -17,7 +17,7 @@ define void @fpext_frommem(<2 x float>*
; X32-SSE-NEXT: retl # encoding: [0xc3]
;
; X32-AVX-LABEL: fpext_frommem:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-AVX-NEXT: vcvtps2pd (%ecx), %xmm0 # encoding: [0xc5,0xf8,0x5a,0x01]
@@ -25,7 +25,7 @@ define void @fpext_frommem(<2 x float>*
; X32-AVX-NEXT: retl # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: fpext_frommem:
-; X32-AVX512VL: # BB#0: # %entry
+; X32-AVX512VL: # %bb.0: # %entry
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-AVX512VL-NEXT: vcvtps2pd (%ecx), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5a,0x01]
@@ -33,19 +33,19 @@ define void @fpext_frommem(<2 x float>*
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: fpext_frommem:
-; X64-SSE: # BB#0: # %entry
+; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: cvtps2pd (%rdi), %xmm0 # encoding: [0x0f,0x5a,0x07]
; X64-SSE-NEXT: movups %xmm0, (%rsi) # encoding: [0x0f,0x11,0x06]
; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX-LABEL: fpext_frommem:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vcvtps2pd (%rdi), %xmm0 # encoding: [0xc5,0xf8,0x5a,0x07]
; X64-AVX-NEXT: vmovups %xmm0, (%rsi) # encoding: [0xc5,0xf8,0x11,0x06]
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: fpext_frommem:
-; X64-AVX512VL: # BB#0: # %entry
+; X64-AVX512VL: # %bb.0: # %entry
; X64-AVX512VL-NEXT: vcvtps2pd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5a,0x07]
; X64-AVX512VL-NEXT: vmovups %xmm0, (%rsi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x06]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
@@ -58,7 +58,7 @@ entry:
define void @fpext_frommem4(<4 x float>* %in, <4 x double>* %out) {
; X32-SSE-LABEL: fpext_frommem4:
-; X32-SSE: # BB#0: # %entry
+; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-SSE-NEXT: cvtps2pd (%ecx), %xmm0 # encoding: [0x0f,0x5a,0x01]
@@ -68,7 +68,7 @@ define void @fpext_frommem4(<4 x float>*
; X32-SSE-NEXT: retl # encoding: [0xc3]
;
; X32-AVX-LABEL: fpext_frommem4:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-AVX-NEXT: vcvtps2pd (%ecx), %ymm0 # encoding: [0xc5,0xfc,0x5a,0x01]
@@ -77,7 +77,7 @@ define void @fpext_frommem4(<4 x float>*
; X32-AVX-NEXT: retl # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: fpext_frommem4:
-; X32-AVX512VL: # BB#0: # %entry
+; X32-AVX512VL: # %bb.0: # %entry
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-AVX512VL-NEXT: vcvtps2pd (%ecx), %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5a,0x01]
@@ -86,7 +86,7 @@ define void @fpext_frommem4(<4 x float>*
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: fpext_frommem4:
-; X64-SSE: # BB#0: # %entry
+; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: cvtps2pd (%rdi), %xmm0 # encoding: [0x0f,0x5a,0x07]
; X64-SSE-NEXT: cvtps2pd 8(%rdi), %xmm1 # encoding: [0x0f,0x5a,0x4f,0x08]
; X64-SSE-NEXT: movups %xmm1, 16(%rsi) # encoding: [0x0f,0x11,0x4e,0x10]
@@ -94,14 +94,14 @@ define void @fpext_frommem4(<4 x float>*
; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX-LABEL: fpext_frommem4:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vcvtps2pd (%rdi), %ymm0 # encoding: [0xc5,0xfc,0x5a,0x07]
; X64-AVX-NEXT: vmovups %ymm0, (%rsi) # encoding: [0xc5,0xfc,0x11,0x06]
; X64-AVX-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: fpext_frommem4:
-; X64-AVX512VL: # BB#0: # %entry
+; X64-AVX512VL: # %bb.0: # %entry
; X64-AVX512VL-NEXT: vcvtps2pd (%rdi), %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5a,0x07]
; X64-AVX512VL-NEXT: vmovups %ymm0, (%rsi) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x06]
; X64-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
@@ -115,7 +115,7 @@ entry:
define void @fpext_frommem8(<8 x float>* %in, <8 x double>* %out) {
; X32-SSE-LABEL: fpext_frommem8:
-; X32-SSE: # BB#0: # %entry
+; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-SSE-NEXT: cvtps2pd (%ecx), %xmm0 # encoding: [0x0f,0x5a,0x01]
@@ -129,7 +129,7 @@ define void @fpext_frommem8(<8 x float>*
; X32-SSE-NEXT: retl # encoding: [0xc3]
;
; X32-AVX-LABEL: fpext_frommem8:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-AVX-NEXT: vcvtps2pd (%ecx), %ymm0 # encoding: [0xc5,0xfc,0x5a,0x01]
@@ -140,7 +140,7 @@ define void @fpext_frommem8(<8 x float>*
; X32-AVX-NEXT: retl # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: fpext_frommem8:
-; X32-AVX512VL: # BB#0: # %entry
+; X32-AVX512VL: # %bb.0: # %entry
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X32-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X32-AVX512VL-NEXT: vcvtps2pd (%ecx), %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x5a,0x01]
@@ -149,7 +149,7 @@ define void @fpext_frommem8(<8 x float>*
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: fpext_frommem8:
-; X64-SSE: # BB#0: # %entry
+; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: cvtps2pd (%rdi), %xmm0 # encoding: [0x0f,0x5a,0x07]
; X64-SSE-NEXT: cvtps2pd 8(%rdi), %xmm1 # encoding: [0x0f,0x5a,0x4f,0x08]
; X64-SSE-NEXT: cvtps2pd 16(%rdi), %xmm2 # encoding: [0x0f,0x5a,0x57,0x10]
@@ -161,7 +161,7 @@ define void @fpext_frommem8(<8 x float>*
; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX-LABEL: fpext_frommem8:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vcvtps2pd (%rdi), %ymm0 # encoding: [0xc5,0xfc,0x5a,0x07]
; X64-AVX-NEXT: vcvtps2pd 16(%rdi), %ymm1 # encoding: [0xc5,0xfc,0x5a,0x4f,0x10]
; X64-AVX-NEXT: vmovups %ymm1, 32(%rsi) # encoding: [0xc5,0xfc,0x11,0x4e,0x20]
@@ -170,7 +170,7 @@ define void @fpext_frommem8(<8 x float>*
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: fpext_frommem8:
-; X64-AVX512VL: # BB#0: # %entry
+; X64-AVX512VL: # %bb.0: # %entry
; X64-AVX512VL-NEXT: vcvtps2pd (%rdi), %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x5a,0x07]
; X64-AVX512VL-NEXT: vmovups %zmm0, (%rsi) # encoding: [0x62,0xf1,0x7c,0x48,0x11,0x06]
; X64-AVX512VL-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77]
@@ -184,42 +184,42 @@ entry:
define <2 x double> @fpext_fromconst() {
; X32-SSE-LABEL: fpext_fromconst:
-; X32-SSE: # BB#0: # %entry
+; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,-2.000000e+00]
; X32-SSE-NEXT: # encoding: [0x0f,0x28,0x05,A,A,A,A]
; X32-SSE-NEXT: # fixup A - offset: 3, value: {{\.LCPI.*}}, kind: FK_Data_4
; X32-SSE-NEXT: retl # encoding: [0xc3]
;
; X32-AVX-LABEL: fpext_fromconst:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1.000000e+00,-2.000000e+00]
; X32-AVX-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X32-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
; X32-AVX-NEXT: retl # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: fpext_fromconst:
-; X32-AVX512VL: # BB#0: # %entry
+; X32-AVX512VL: # %bb.0: # %entry
; X32-AVX512VL-NEXT: vmovaps {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [1.000000e+00,-2.000000e+00]
; X32-AVX512VL-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X32-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: fpext_fromconst:
-; X64-SSE: # BB#0: # %entry
+; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,-2.000000e+00]
; X64-SSE-NEXT: # encoding: [0x0f,0x28,0x05,A,A,A,A]
; X64-SSE-NEXT: # fixup A - offset: 3, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX-LABEL: fpext_fromconst:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1.000000e+00,-2.000000e+00]
; X64-AVX-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: fpext_fromconst:
-; X64-AVX512VL: # BB#0: # %entry
+; X64-AVX512VL: # %bb.0: # %entry
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [1.000000e+00,-2.000000e+00]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
Modified: llvm/trunk/test/CodeGen/X86/vec_fptrunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_fptrunc.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_fptrunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_fptrunc.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@
define void @fptrunc_frommem2(<2 x double>* %in, <2 x float>* %out) {
; X32-SSE-LABEL: fptrunc_frommem2:
-; X32-SSE: # BB#0: # %entry
+; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE-NEXT: cvtpd2ps (%ecx), %xmm0
@@ -15,7 +15,7 @@ define void @fptrunc_frommem2(<2 x doubl
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: fptrunc_frommem2:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX-NEXT: vcvtpd2psx (%ecx), %xmm0
@@ -24,13 +24,13 @@ define void @fptrunc_frommem2(<2 x doubl
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: fptrunc_frommem2:
-; X64-SSE: # BB#0: # %entry
+; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: cvtpd2ps (%rdi), %xmm0
; X64-SSE-NEXT: movlpd %xmm0, (%rsi)
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: fptrunc_frommem2:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vcvtpd2psx (%rdi), %xmm0
; X64-AVX-NEXT: vmovlpd %xmm0, (%rsi)
; X64-AVX-NEXT: retq
@@ -43,7 +43,7 @@ entry:
define void @fptrunc_frommem4(<4 x double>* %in, <4 x float>* %out) {
; X32-SSE-LABEL: fptrunc_frommem4:
-; X32-SSE: # BB#0: # %entry
+; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE-NEXT: cvtpd2ps 16(%ecx), %xmm0
@@ -53,7 +53,7 @@ define void @fptrunc_frommem4(<4 x doubl
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: fptrunc_frommem4:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX-NEXT: vcvtpd2psy (%ecx), %xmm0
@@ -61,7 +61,7 @@ define void @fptrunc_frommem4(<4 x doubl
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: fptrunc_frommem4:
-; X64-SSE: # BB#0: # %entry
+; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: cvtpd2ps 16(%rdi), %xmm0
; X64-SSE-NEXT: cvtpd2ps (%rdi), %xmm1
; X64-SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
@@ -69,7 +69,7 @@ define void @fptrunc_frommem4(<4 x doubl
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: fptrunc_frommem4:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vcvtpd2psy (%rdi), %xmm0
; X64-AVX-NEXT: vmovupd %xmm0, (%rsi)
; X64-AVX-NEXT: retq
@@ -82,7 +82,7 @@ entry:
define void @fptrunc_frommem8(<8 x double>* %in, <8 x float>* %out) {
; X32-SSE-LABEL: fptrunc_frommem8:
-; X32-SSE: # BB#0: # %entry
+; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE-NEXT: cvtpd2ps 16(%ecx), %xmm0
@@ -96,7 +96,7 @@ define void @fptrunc_frommem8(<8 x doubl
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: fptrunc_frommem8:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-AVX-NEXT: vcvtpd2psy (%ecx), %xmm0
@@ -107,7 +107,7 @@ define void @fptrunc_frommem8(<8 x doubl
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: fptrunc_frommem8:
-; X64-SSE: # BB#0: # %entry
+; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: cvtpd2ps 16(%rdi), %xmm0
; X64-SSE-NEXT: cvtpd2ps (%rdi), %xmm1
; X64-SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
@@ -119,7 +119,7 @@ define void @fptrunc_frommem8(<8 x doubl
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: fptrunc_frommem8:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vcvtpd2psy (%rdi), %xmm0
; X64-AVX-NEXT: vcvtpd2psy 32(%rdi), %xmm1
; X64-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -135,24 +135,24 @@ entry:
define <4 x float> @fptrunc_frommem2_zext(<2 x double> * %ld) {
; X32-SSE-LABEL: fptrunc_frommem2_zext:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: cvtpd2ps (%eax), %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: fptrunc_frommem2_zext:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vcvtpd2psx (%eax), %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: fptrunc_frommem2_zext:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtpd2ps (%rdi), %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: fptrunc_frommem2_zext:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vcvtpd2psx (%rdi), %xmm0
; X64-AVX-NEXT: retq
%arg = load <2 x double>, <2 x double> * %ld, align 16
@@ -163,22 +163,22 @@ define <4 x float> @fptrunc_frommem2_zex
define <4 x float> @fptrunc_fromreg2_zext(<2 x double> %arg) {
; X32-SSE-LABEL: fptrunc_fromreg2_zext:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: cvtpd2ps %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: fptrunc_fromreg2_zext:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vcvtpd2ps %xmm0, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: fptrunc_fromreg2_zext:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtpd2ps %xmm0, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: fptrunc_fromreg2_zext:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vcvtpd2ps %xmm0, %xmm0
; X64-AVX-NEXT: retq
%cvt = fptrunc <2 x double> %arg to <2 x float>
@@ -189,26 +189,26 @@ define <4 x float> @fptrunc_fromreg2_zex
; FIXME: For exact truncations we should be able to fold this.
define <4 x float> @fptrunc_fromconst() {
; X32-SSE-LABEL: fptrunc_fromconst:
-; X32-SSE: # BB#0: # %entry
+; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: cvtpd2ps {{\.LCPI.*}}, %xmm1
; X32-SSE-NEXT: cvtpd2ps {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: fptrunc_fromconst:
-; X32-AVX: # BB#0: # %entry
+; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: vcvtpd2psy {{\.LCPI.*}}, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: fptrunc_fromconst:
-; X64-SSE: # BB#0: # %entry
+; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: cvtpd2ps {{.*}}(%rip), %xmm1
; X64-SSE-NEXT: cvtpd2ps {{.*}}(%rip), %xmm0
; X64-SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: fptrunc_fromconst:
-; X64-AVX: # BB#0: # %entry
+; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vcvtpd2psy {{.*}}(%rip), %xmm0
; X64-AVX-NEXT: retq
entry:
Modified: llvm/trunk/test/CodeGen/X86/vec_i64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_i64.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_i64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_i64.ll Mon Dec 4 09:18:51 2017
@@ -6,13 +6,13 @@
define <2 x i64> @foo1(i64* %y) nounwind {
; X32-LABEL: foo1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: retl
;
; X64-LABEL: foo1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: retq
entry:
@@ -25,13 +25,13 @@ entry:
define <4 x float> @foo2(i64* %p) nounwind {
; X32-LABEL: foo2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: retl
;
; X64-LABEL: foo2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: retq
entry:
Modified: llvm/trunk/test/CodeGen/X86/vec_ins_extract-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_ins_extract-1.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_ins_extract-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_ins_extract-1.ll Mon Dec 4 09:18:51 2017
@@ -7,7 +7,7 @@
define i32 @t0(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
; X32-LABEL: t0:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -21,7 +21,7 @@ define i32 @t0(i32 inreg %t7, <4 x i32>
; X32-NEXT: retl
;
; X64-LABEL: t0:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
@@ -35,7 +35,7 @@ define i32 @t0(i32 inreg %t7, <4 x i32>
define i32 @t1(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -50,7 +50,7 @@ define i32 @t1(i32 inreg %t7, <4 x i32>
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movl $76, %eax
; X64-NEXT: pinsrd $0, %eax, %xmm0
@@ -65,7 +65,7 @@ define i32 @t1(i32 inreg %t7, <4 x i32>
define <4 x i32> @t2(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
; X32-LABEL: t2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -78,7 +78,7 @@ define <4 x i32> @t2(i32 inreg %t7, <4 x
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
@@ -91,7 +91,7 @@ define <4 x i32> @t2(i32 inreg %t7, <4 x
define <4 x i32> @t3(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
; X32-LABEL: t3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -105,7 +105,7 @@ define <4 x i32> @t3(i32 inreg %t7, <4 x
; X32-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
Modified: llvm/trunk/test/CodeGen/X86/vec_insert-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_insert-2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_insert-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_insert-2.ll Mon Dec 4 09:18:51 2017
@@ -4,14 +4,14 @@
define <4 x float> @t1(float %s, <4 x float> %tmp) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0]
; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,0]
; X64-NEXT: movaps %xmm1, %xmm0
@@ -22,14 +22,14 @@ define <4 x float> @t1(float %s, <4 x fl
define <4 x i32> @t2(i32 %s, <4 x i32> %tmp) nounwind {
; X32-LABEL: t2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd %edi, %xmm1
; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
@@ -40,12 +40,12 @@ define <4 x i32> @t2(i32 %s, <4 x i32> %
define <2 x double> @t3(double %s, <2 x double> %tmp) nounwind {
; X32-LABEL: t3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; X32-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
@@ -55,12 +55,12 @@ define <2 x double> @t3(double %s, <2 x
define <8 x i16> @t4(i16 %s, <8 x i16> %tmp) nounwind {
; X32-LABEL: t4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pinsrw $5, {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pinsrw $5, %edi, %xmm0
; X64-NEXT: retq
%tmp1 = insertelement <8 x i16> %tmp, i16 %s, i32 5
Modified: llvm/trunk/test/CodeGen/X86/vec_insert-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_insert-3.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_insert-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_insert-3.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define <2 x i64> @t1(i64 %s, <2 x i64> %tmp) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
@@ -14,7 +14,7 @@ define <2 x i64> @t1(i64 %s, <2 x i64> %
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %xmm1
; X64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X64-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/vec_insert-4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_insert-4.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_insert-4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_insert-4.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define <8 x float> @f(<8 x float> %a, i32 %b) nounwind {
; X32-LABEL: f:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-32, %esp
@@ -21,7 +21,7 @@ define <8 x float> @f(<8 x float> %a, i3
; X32-NEXT: retl
;
; X64-LABEL: f:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: pushq %rbp
; X64-NEXT: movq %rsp, %rbp
; X64-NEXT: andq $-32, %rsp
Modified: llvm/trunk/test/CodeGen/X86/vec_insert-5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_insert-5.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_insert-5.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_insert-5.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@
define void @t1(i32 %a, x86_mmx* %P) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: shll $12, %ecx
@@ -16,7 +16,7 @@ define void @t1(i32 %a, x86_mmx* %P) no
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: shll $12, %edi
; X64-NEXT: movq %rdi, %xmm0
@@ -34,7 +34,7 @@ define void @t1(i32 %a, x86_mmx* %P) no
define <4 x float> @t2(<4 x float>* %P) nounwind {
; X32-LABEL: t2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movaps (%eax), %xmm1
; X32-NEXT: xorps %xmm0, %xmm0
@@ -43,7 +43,7 @@ define <4 x float> @t2(<4 x float>* %P)
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps (%rdi), %xmm1
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
@@ -56,14 +56,14 @@ define <4 x float> @t2(<4 x float>* %P)
define <4 x float> @t3(<4 x float>* %P) nounwind {
; X32-LABEL: t3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorps %xmm0, %xmm0
; X32-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
; X32-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
; X64-NEXT: retq
@@ -74,7 +74,7 @@ define <4 x float> @t3(<4 x float>* %P)
define <4 x float> @t4(<4 x float>* %P) nounwind {
; X32-LABEL: t4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movaps (%eax), %xmm0
; X32-NEXT: xorps %xmm1, %xmm1
@@ -83,7 +83,7 @@ define <4 x float> @t4(<4 x float>* %P)
; X32-NEXT: retl
;
; X64-LABEL: t4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps (%rdi), %xmm0
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[1,0]
@@ -96,12 +96,12 @@ define <4 x float> @t4(<4 x float>* %P)
define <16 x i8> @t5(<16 x i8> %x) nounwind {
; X32-LABEL: t5:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrlw $8, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrlw $8, %xmm0
; X64-NEXT: retq
%s = shufflevector <16 x i8> %x, <16 x i8> zeroinitializer, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 17>
@@ -110,12 +110,12 @@ define <16 x i8> @t5(<16 x i8> %x) nounw
define <16 x i8> @t6(<16 x i8> %x) nounwind {
; X32-LABEL: t6:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrlw $8, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t6:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrlw $8, %xmm0
; X64-NEXT: retq
%s = shufflevector <16 x i8> %x, <16 x i8> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -124,12 +124,12 @@ define <16 x i8> @t6(<16 x i8> %x) nounw
define <16 x i8> @t7(<16 x i8> %x) nounwind {
; X32-LABEL: t7:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2]
; X32-NEXT: retl
;
; X64-LABEL: t7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2]
; X64-NEXT: retq
%s = shufflevector <16 x i8> %x, <16 x i8> undef, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 1, i32 2>
@@ -138,12 +138,12 @@ define <16 x i8> @t7(<16 x i8> %x) nounw
define <16 x i8> @t8(<16 x i8> %x) nounwind {
; X32-LABEL: t8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
; X32-NEXT: retl
;
; X64-LABEL: t8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
; X64-NEXT: retq
%s = shufflevector <16 x i8> %x, <16 x i8> zeroinitializer, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 8, i32 9, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 17>
@@ -152,12 +152,12 @@ define <16 x i8> @t8(<16 x i8> %x) nounw
define <16 x i8> @t9(<16 x i8> %x) nounwind {
; X32-LABEL: t9:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
; X32-NEXT: retl
;
; X64-LABEL: t9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
; X64-NEXT: retq
%s = shufflevector <16 x i8> %x, <16 x i8> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 7, i32 8, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 14, i32 undef, i32 undef>
Modified: llvm/trunk/test/CodeGen/X86/vec_insert-7.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_insert-7.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_insert-7.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_insert-7.ll Mon Dec 4 09:18:51 2017
@@ -7,7 +7,7 @@
define x86_mmx @mmx_movzl(x86_mmx %x) nounwind {
; X32-LABEL: mmx_movzl:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: subl $20, %esp
; X32-NEXT: movq %mm0, {{[0-9]+}}(%esp)
; X32-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
@@ -21,7 +21,7 @@ define x86_mmx @mmx_movzl(x86_mmx %x) no
; X32-NEXT: retl
;
; X64-LABEL: mmx_movzl:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movdq2q %xmm0, %mm0
; X64-NEXT: movq %mm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
Modified: llvm/trunk/test/CodeGen/X86/vec_insert-8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_insert-8.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_insert-8.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_insert-8.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@
define <4 x i32> @var_insert(<4 x i32> %x, i32 %val, i32 %idx) nounwind {
; X32-LABEL: var_insert:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -22,7 +22,7 @@ define <4 x i32> @var_insert(<4 x i32> %
; X32-NEXT: retl
;
; X64-LABEL: var_insert:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %esi
@@ -36,7 +36,7 @@ entry:
define i32 @var_extract(<4 x i32> %x, i32 %idx) nounwind {
; X32-LABEL: var_extract:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
@@ -50,7 +50,7 @@ define i32 @var_extract(<4 x i32> %x, i3
; X32-NEXT: retl
;
; X64-LABEL: var_extract:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
Modified: llvm/trunk/test/CodeGen/X86/vec_insert-9.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_insert-9.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_insert-9.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_insert-9.ll Mon Dec 4 09:18:51 2017
@@ -4,13 +4,13 @@
define <4 x i32> @var_insert2(<4 x i32> %x, i32 %val, i32 %idx) nounwind {
; X32-LABEL: var_insert2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: pinsrd $3, {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: var_insert2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd %edi, %xmm0
; X64-NEXT: pinsrd $3, %esi, %xmm0
; X64-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/vec_insert-mmx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_insert-mmx.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_insert-mmx.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_insert-mmx.ll Mon Dec 4 09:18:51 2017
@@ -5,7 +5,7 @@
; This is not an MMX operation; promoted to xmm.
define x86_mmx @t0(i32 %A) nounwind {
; X32-LABEL: t0:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: subl $12, %esp
; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
@@ -15,7 +15,7 @@ define x86_mmx @t0(i32 %A) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: t0:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
@@ -28,12 +28,12 @@ define x86_mmx @t0(i32 %A) nounwind {
define <8 x i8> @t1(i8 zeroext %x) nounwind {
; X32-LABEL: t1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movd %edi, %xmm0
; X64-NEXT: retq
%r = insertelement <8 x i8> undef, i8 %x, i32 0
@@ -43,12 +43,12 @@ define <8 x i8> @t1(i8 zeroext %x) nounw
; PR2574
define <2 x float> @t2(<2 x float> %a0) {
; X32-LABEL: t2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: xorps %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: retq
%v1 = insertelement <2 x float> %a0, float 0.000000e+00, i32 0
@@ -62,7 +62,7 @@ define <2 x float> @t2(<2 x float> %a0)
; PR2562
define void @t3() {
; X32-LABEL: t3:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl L_g0$non_lazy_ptr, %eax
; X32-NEXT: movl L_g1$non_lazy_ptr, %ecx
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
@@ -77,7 +77,7 @@ define void @t3() {
; X32-NEXT: retl
;
; X64-LABEL: t3:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: movq _g0@{{.*}}(%rip), %rax
; X64-NEXT: movq _g1@{{.*}}(%rip), %rcx
; X64-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
Modified: llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll Mon Dec 4 09:18:51 2017
@@ -18,7 +18,7 @@
define <2 x double> @sitofp_2i64_to_2f64(<2 x i64> %a) {
; SSE-LABEL: sitofp_2i64_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: cvtsi2sdq %rax, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -30,7 +30,7 @@ define <2 x double> @sitofp_2i64_to_2f64
; SSE-NEXT: retq
;
; VEX-LABEL: sitofp_2i64_to_2f64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpextrq $1, %xmm0, %rax
; VEX-NEXT: vcvtsi2sdq %rax, %xmm1, %xmm1
; VEX-NEXT: vmovq %xmm0, %rax
@@ -39,7 +39,7 @@ define <2 x double> @sitofp_2i64_to_2f64
; VEX-NEXT: retq
;
; AVX512F-LABEL: sitofp_2i64_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtsi2sdq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -48,7 +48,7 @@ define <2 x double> @sitofp_2i64_to_2f64
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_2i64_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtsi2sdq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -57,7 +57,7 @@ define <2 x double> @sitofp_2i64_to_2f64
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_2i64_to_2f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -65,7 +65,7 @@ define <2 x double> @sitofp_2i64_to_2f64
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_2i64_to_2f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2pd %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = sitofp <2 x i64> %a to <2 x double>
@@ -74,12 +74,12 @@ define <2 x double> @sitofp_2i64_to_2f64
define <2 x double> @sitofp_2i32_to_2f64(<4 x i32> %a) {
; SSE-LABEL: sitofp_2i32_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_2i32_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX-NEXT: retq
%shuf = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
@@ -89,12 +89,12 @@ define <2 x double> @sitofp_2i32_to_2f64
define <2 x double> @sitofp_4i32_to_2f64(<4 x i32> %a) {
; SSE-LABEL: sitofp_4i32_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_4i32_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX-NEXT: vzeroupper
@@ -106,14 +106,14 @@ define <2 x double> @sitofp_4i32_to_2f64
define <2 x double> @sitofp_2i16_to_2f64(<8 x i16> %a) {
; SSE-LABEL: sitofp_2i16_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_2i16_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX-NEXT: retq
@@ -124,14 +124,14 @@ define <2 x double> @sitofp_2i16_to_2f64
define <2 x double> @sitofp_8i16_to_2f64(<8 x i16> %a) {
; SSE-LABEL: sitofp_8i16_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_8i16_to_2f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -139,7 +139,7 @@ define <2 x double> @sitofp_8i16_to_2f64
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_8i16_to_2f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -147,7 +147,7 @@ define <2 x double> @sitofp_8i16_to_2f64
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_8i16_to_2f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -160,7 +160,7 @@ define <2 x double> @sitofp_8i16_to_2f64
define <2 x double> @sitofp_2i8_to_2f64(<16 x i8> %a) {
; SSE-LABEL: sitofp_2i8_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $24, %xmm0
@@ -168,7 +168,7 @@ define <2 x double> @sitofp_2i8_to_2f64(
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_2i8_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX-NEXT: retq
@@ -179,7 +179,7 @@ define <2 x double> @sitofp_2i8_to_2f64(
define <2 x double> @sitofp_16i8_to_2f64(<16 x i8> %a) {
; SSE-LABEL: sitofp_16i8_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $24, %xmm0
@@ -187,7 +187,7 @@ define <2 x double> @sitofp_16i8_to_2f64
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_16i8_to_2f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -195,7 +195,7 @@ define <2 x double> @sitofp_16i8_to_2f64
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_16i8_to_2f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -203,7 +203,7 @@ define <2 x double> @sitofp_16i8_to_2f64
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_16i8_to_2f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -216,7 +216,7 @@ define <2 x double> @sitofp_16i8_to_2f64
define <4 x double> @sitofp_4i64_to_4f64(<4 x i64> %a) {
; SSE-LABEL: sitofp_4i64_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: cvtsi2sdq %rax, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -236,7 +236,7 @@ define <4 x double> @sitofp_4i64_to_4f64
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_4i64_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm1, %rax
; AVX1-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm2
@@ -252,7 +252,7 @@ define <4 x double> @sitofp_4i64_to_4f64
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_4i64_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpextrq $1, %xmm1, %rax
; AVX2-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm2
@@ -268,7 +268,7 @@ define <4 x double> @sitofp_4i64_to_4f64
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sitofp_4i64_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vpextrq $1, %xmm1, %rax
; AVX512F-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm2
@@ -284,7 +284,7 @@ define <4 x double> @sitofp_4i64_to_4f64
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_4i64_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vpextrq $1, %xmm1, %rax
; AVX512VL-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm2
@@ -300,14 +300,14 @@ define <4 x double> @sitofp_4i64_to_4f64
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_4i64_to_4f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_4i64_to_4f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2pd %ymm0, %ymm0
; AVX512VLDQ-NEXT: retq
%cvt = sitofp <4 x i64> %a to <4 x double>
@@ -316,7 +316,7 @@ define <4 x double> @sitofp_4i64_to_4f64
define <4 x double> @sitofp_4i32_to_4f64(<4 x i32> %a) {
; SSE-LABEL: sitofp_4i32_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtdq2pd %xmm0, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE-NEXT: cvtdq2pd %xmm0, %xmm1
@@ -324,7 +324,7 @@ define <4 x double> @sitofp_4i32_to_4f64
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_4i32_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
%cvt = sitofp <4 x i32> %a to <4 x double>
@@ -333,7 +333,7 @@ define <4 x double> @sitofp_4i32_to_4f64
define <4 x double> @sitofp_4i16_to_4f64(<8 x i16> %a) {
; SSE-LABEL: sitofp_4i16_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: cvtdq2pd %xmm1, %xmm0
@@ -342,7 +342,7 @@ define <4 x double> @sitofp_4i16_to_4f64
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_4i16_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
@@ -353,7 +353,7 @@ define <4 x double> @sitofp_4i16_to_4f64
define <4 x double> @sitofp_8i16_to_4f64(<8 x i16> %a) {
; SSE-LABEL: sitofp_8i16_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: cvtdq2pd %xmm1, %xmm0
@@ -362,19 +362,19 @@ define <4 x double> @sitofp_8i16_to_4f64
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_8i16_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_8i16_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_8i16_to_4f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -386,7 +386,7 @@ define <4 x double> @sitofp_8i16_to_4f64
define <4 x double> @sitofp_4i8_to_4f64(<16 x i8> %a) {
; SSE-LABEL: sitofp_4i8_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: psrad $24, %xmm1
@@ -396,7 +396,7 @@ define <4 x double> @sitofp_4i8_to_4f64(
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_4i8_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
@@ -407,7 +407,7 @@ define <4 x double> @sitofp_4i8_to_4f64(
define <4 x double> @sitofp_16i8_to_4f64(<16 x i8> %a) {
; SSE-LABEL: sitofp_16i8_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: psrad $24, %xmm1
@@ -417,19 +417,19 @@ define <4 x double> @sitofp_16i8_to_4f64
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_16i8_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_16i8_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_16i8_to_4f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -445,7 +445,7 @@ define <4 x double> @sitofp_16i8_to_4f64
define <2 x double> @uitofp_2i64_to_2f64(<2 x i64> %a) {
; SSE-LABEL: uitofp_2i64_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -461,7 +461,7 @@ define <2 x double> @uitofp_2i64_to_2f64
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_2i64_to_2f64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovapd {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
; VEX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; VEX-NEXT: vmovapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25]
@@ -473,7 +473,7 @@ define <2 x double> @uitofp_2i64_to_2f64
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_2i64_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtusi2sdq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -482,7 +482,7 @@ define <2 x double> @uitofp_2i64_to_2f64
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_2i64_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtusi2sdq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -491,7 +491,7 @@ define <2 x double> @uitofp_2i64_to_2f64
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_2i64_to_2f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -499,7 +499,7 @@ define <2 x double> @uitofp_2i64_to_2f64
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_2i64_to_2f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2pd %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <2 x i64> %a to <2 x double>
@@ -508,7 +508,7 @@ define <2 x double> @uitofp_2i64_to_2f64
define <2 x double> @uitofp_2i32_to_2f64(<4 x i32> %a) {
; SSE-LABEL: uitofp_2i32_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,0,65535,0,65535,0]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: cvtdq2pd %xmm1, %xmm1
@@ -519,7 +519,7 @@ define <2 x double> @uitofp_2i32_to_2f64
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_2i32_to_2f64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VEX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; VEX-NEXT: vcvtdq2pd %xmm1, %xmm1
@@ -530,7 +530,7 @@ define <2 x double> @uitofp_2i32_to_2f64
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_2i32_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -538,12 +538,12 @@ define <2 x double> @uitofp_2i32_to_2f64
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_2i32_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2pd %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_2i32_to_2f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -551,7 +551,7 @@ define <2 x double> @uitofp_2i32_to_2f64
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_2i32_to_2f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2pd %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%shuf = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
@@ -561,7 +561,7 @@ define <2 x double> @uitofp_2i32_to_2f64
define <2 x double> @uitofp_4i32_to_2f64(<4 x i32> %a) {
; SSE-LABEL: uitofp_4i32_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,0,65535,0,65535,0]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: cvtdq2pd %xmm1, %xmm1
@@ -572,7 +572,7 @@ define <2 x double> @uitofp_4i32_to_2f64
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_4i32_to_2f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1
@@ -585,7 +585,7 @@ define <2 x double> @uitofp_4i32_to_2f64
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_4i32_to_2f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX2-NEXT: vcvtdq2pd %xmm1, %ymm1
; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [65536,65536,65536,65536]
@@ -599,7 +599,7 @@ define <2 x double> @uitofp_4i32_to_2f64
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_4i32_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -607,14 +607,14 @@ define <2 x double> @uitofp_4i32_to_2f64
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i32_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2pd %xmm0, %ymm0
; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i32_to_2f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -622,7 +622,7 @@ define <2 x double> @uitofp_4i32_to_2f64
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i32_to_2f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2pd %xmm0, %ymm0
; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VLDQ-NEXT: vzeroupper
@@ -634,14 +634,14 @@ define <2 x double> @uitofp_4i32_to_2f64
define <2 x double> @uitofp_2i16_to_2f64(<8 x i16> %a) {
; SSE-LABEL: uitofp_2i16_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_2i16_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX-NEXT: retq
@@ -652,14 +652,14 @@ define <2 x double> @uitofp_2i16_to_2f64
define <2 x double> @uitofp_8i16_to_2f64(<8 x i16> %a) {
; SSE-LABEL: uitofp_8i16_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_8i16_to_2f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -667,7 +667,7 @@ define <2 x double> @uitofp_8i16_to_2f64
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_8i16_to_2f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -675,7 +675,7 @@ define <2 x double> @uitofp_8i16_to_2f64
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_8i16_to_2f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -688,7 +688,7 @@ define <2 x double> @uitofp_8i16_to_2f64
define <2 x double> @uitofp_2i8_to_2f64(<16 x i8> %a) {
; SSE-LABEL: uitofp_2i8_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -696,7 +696,7 @@ define <2 x double> @uitofp_2i8_to_2f64(
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_2i8_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX-NEXT: retq
@@ -707,7 +707,7 @@ define <2 x double> @uitofp_2i8_to_2f64(
define <2 x double> @uitofp_16i8_to_2f64(<16 x i8> %a) {
; SSE-LABEL: uitofp_16i8_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -715,7 +715,7 @@ define <2 x double> @uitofp_16i8_to_2f64
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_16i8_to_2f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -723,7 +723,7 @@ define <2 x double> @uitofp_16i8_to_2f64
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_16i8_to_2f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -731,7 +731,7 @@ define <2 x double> @uitofp_16i8_to_2f64
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_16i8_to_2f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -744,7 +744,7 @@ define <2 x double> @uitofp_16i8_to_2f64
define <4 x double> @uitofp_4i64_to_4f64(<4 x i64> %a) {
; SSE-LABEL: uitofp_4i64_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
@@ -770,7 +770,7 @@ define <4 x double> @uitofp_4i64_to_4f64
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_4i64_to_4f64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vextractf128 $1, %ymm0, %xmm1
; VEX-NEXT: vmovapd {{.*#+}} xmm2 = [1127219200,1160773632,0,0]
; VEX-NEXT: vunpcklps {{.*#+}} xmm3 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
@@ -790,7 +790,7 @@ define <4 x double> @uitofp_4i64_to_4f64
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_4i64_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vpextrq $1, %xmm1, %rax
; AVX512F-NEXT: vcvtusi2sdq %rax, %xmm2, %xmm2
@@ -806,7 +806,7 @@ define <4 x double> @uitofp_4i64_to_4f64
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i64_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vpextrq $1, %xmm1, %rax
; AVX512VL-NEXT: vcvtusi2sdq %rax, %xmm2, %xmm2
@@ -822,14 +822,14 @@ define <4 x double> @uitofp_4i64_to_4f64
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i64_to_4f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i64_to_4f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2pd %ymm0, %ymm0
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <4 x i64> %a to <4 x double>
@@ -838,7 +838,7 @@ define <4 x double> @uitofp_4i64_to_4f64
define <4 x double> @uitofp_4i32_to_4f64(<4 x i32> %a) {
; SSE-LABEL: uitofp_4i32_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrld $16, %xmm1
; SSE-NEXT: cvtdq2pd %xmm1, %xmm1
@@ -859,7 +859,7 @@ define <4 x double> @uitofp_4i32_to_4f64
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_4i32_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1
@@ -870,7 +870,7 @@ define <4 x double> @uitofp_4i32_to_4f64
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_4i32_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX2-NEXT: vcvtdq2pd %xmm1, %ymm1
; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [65536,65536,65536,65536]
@@ -882,26 +882,26 @@ define <4 x double> @uitofp_4i32_to_4f64
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_4i32_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i32_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2pd %xmm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i32_to_4f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i32_to_4f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2pd %xmm0, %ymm0
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <4 x i32> %a to <4 x double>
@@ -910,7 +910,7 @@ define <4 x double> @uitofp_4i32_to_4f64
define <4 x double> @uitofp_4i16_to_4f64(<8 x i16> %a) {
; SSE-LABEL: uitofp_4i16_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: cvtdq2pd %xmm0, %xmm2
@@ -920,7 +920,7 @@ define <4 x double> @uitofp_4i16_to_4f64
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_4i16_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
@@ -931,7 +931,7 @@ define <4 x double> @uitofp_4i16_to_4f64
define <4 x double> @uitofp_8i16_to_4f64(<8 x i16> %a) {
; SSE-LABEL: uitofp_8i16_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: cvtdq2pd %xmm0, %xmm2
@@ -941,19 +941,19 @@ define <4 x double> @uitofp_8i16_to_4f64
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_8i16_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_8i16_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_8i16_to_4f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -965,7 +965,7 @@ define <4 x double> @uitofp_8i16_to_4f64
define <4 x double> @uitofp_4i8_to_4f64(<16 x i8> %a) {
; SSE-LABEL: uitofp_4i8_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -976,7 +976,7 @@ define <4 x double> @uitofp_4i8_to_4f64(
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_4i8_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
@@ -987,7 +987,7 @@ define <4 x double> @uitofp_4i8_to_4f64(
define <4 x double> @uitofp_16i8_to_4f64(<16 x i8> %a) {
; SSE-LABEL: uitofp_16i8_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -998,19 +998,19 @@ define <4 x double> @uitofp_16i8_to_4f64
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_16i8_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_16i8_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_16i8_to_4f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -1026,7 +1026,7 @@ define <4 x double> @uitofp_16i8_to_4f64
define <4 x float> @sitofp_2i64_to_4f32(<2 x i64> %a) {
; SSE-LABEL: sitofp_2i64_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1038,7 +1038,7 @@ define <4 x float> @sitofp_2i64_to_4f32(
; SSE-NEXT: retq
;
; VEX-LABEL: sitofp_2i64_to_4f32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpextrq $1, %xmm0, %rax
; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; VEX-NEXT: vmovq %xmm0, %rax
@@ -1049,7 +1049,7 @@ define <4 x float> @sitofp_2i64_to_4f32(
; VEX-NEXT: retq
;
; AVX512F-LABEL: sitofp_2i64_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -1060,7 +1060,7 @@ define <4 x float> @sitofp_2i64_to_4f32(
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_2i64_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -1071,7 +1071,7 @@ define <4 x float> @sitofp_2i64_to_4f32(
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_2i64_to_4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1079,7 +1079,7 @@ define <4 x float> @sitofp_2i64_to_4f32(
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_2i64_to_4f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2ps %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = sitofp <2 x i64> %a to <2 x float>
@@ -1089,7 +1089,7 @@ define <4 x float> @sitofp_2i64_to_4f32(
define <4 x float> @sitofp_2i64_to_4f32_zero(<2 x i64> %a) {
; SSE-LABEL: sitofp_2i64_to_4f32_zero:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: xorps %xmm1, %xmm1
@@ -1102,7 +1102,7 @@ define <4 x float> @sitofp_2i64_to_4f32_
; SSE-NEXT: retq
;
; VEX-LABEL: sitofp_2i64_to_4f32_zero:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpextrq $1, %xmm0, %rax
; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; VEX-NEXT: vmovq %xmm0, %rax
@@ -1111,7 +1111,7 @@ define <4 x float> @sitofp_2i64_to_4f32_
; VEX-NEXT: retq
;
; AVX512F-LABEL: sitofp_2i64_to_4f32_zero:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -1120,7 +1120,7 @@ define <4 x float> @sitofp_2i64_to_4f32_
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_2i64_to_4f32_zero:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -1130,7 +1130,7 @@ define <4 x float> @sitofp_2i64_to_4f32_
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_2i64_to_4f32_zero:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
@@ -1138,7 +1138,7 @@ define <4 x float> @sitofp_2i64_to_4f32_
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_2i64_to_4f32_zero:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2ps %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = sitofp <2 x i64> %a to <2 x float>
@@ -1148,7 +1148,7 @@ define <4 x float> @sitofp_2i64_to_4f32_
define <4 x float> @sitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; SSE-LABEL: sitofp_4i64_to_4f32_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -1163,7 +1163,7 @@ define <4 x float> @sitofp_4i64_to_4f32_
; SSE-NEXT: retq
;
; VEX-LABEL: sitofp_4i64_to_4f32_undef:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpextrq $1, %xmm0, %rax
; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; VEX-NEXT: vmovq %xmm0, %rax
@@ -1174,7 +1174,7 @@ define <4 x float> @sitofp_4i64_to_4f32_
; VEX-NEXT: retq
;
; AVX512F-LABEL: sitofp_4i64_to_4f32_undef:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -1185,7 +1185,7 @@ define <4 x float> @sitofp_4i64_to_4f32_
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_4i64_to_4f32_undef:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -1196,7 +1196,7 @@ define <4 x float> @sitofp_4i64_to_4f32_
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_4i64_to_4f32_undef:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1204,7 +1204,7 @@ define <4 x float> @sitofp_4i64_to_4f32_
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_4i64_to_4f32_undef:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512VLDQ-NEXT: vcvtqq2ps %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
@@ -1216,12 +1216,12 @@ define <4 x float> @sitofp_4i64_to_4f32_
define <4 x float> @sitofp_4i32_to_4f32(<4 x i32> %a) {
; SSE-LABEL: sitofp_4i32_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_4i32_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
%cvt = sitofp <4 x i32> %a to <4 x float>
@@ -1230,14 +1230,14 @@ define <4 x float> @sitofp_4i32_to_4f32(
define <4 x float> @sitofp_4i16_to_4f32(<8 x i16> %a) {
; SSE-LABEL: sitofp_4i16_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_4i16_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
@@ -1248,14 +1248,14 @@ define <4 x float> @sitofp_4i16_to_4f32(
define <4 x float> @sitofp_8i16_to_4f32(<8 x i16> %a) {
; SSE-LABEL: sitofp_8i16_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $16, %xmm0
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_8i16_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
@@ -1266,7 +1266,7 @@ define <4 x float> @sitofp_8i16_to_4f32(
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_8i16_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1274,7 +1274,7 @@ define <4 x float> @sitofp_8i16_to_4f32(
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_8i16_to_4f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1287,7 +1287,7 @@ define <4 x float> @sitofp_8i16_to_4f32(
define <4 x float> @sitofp_4i8_to_4f32(<16 x i8> %a) {
; SSE-LABEL: sitofp_4i8_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $24, %xmm0
@@ -1295,7 +1295,7 @@ define <4 x float> @sitofp_4i8_to_4f32(<
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_4i8_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
@@ -1306,7 +1306,7 @@ define <4 x float> @sitofp_4i8_to_4f32(<
define <4 x float> @sitofp_16i8_to_4f32(<16 x i8> %a) {
; SSE-LABEL: sitofp_16i8_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $24, %xmm0
@@ -1314,7 +1314,7 @@ define <4 x float> @sitofp_16i8_to_4f32(
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_16i8_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
@@ -1325,7 +1325,7 @@ define <4 x float> @sitofp_16i8_to_4f32(
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_16i8_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1333,7 +1333,7 @@ define <4 x float> @sitofp_16i8_to_4f32(
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_16i8_to_4f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1346,7 +1346,7 @@ define <4 x float> @sitofp_16i8_to_4f32(
define <4 x float> @sitofp_4i64_to_4f32(<4 x i64> %a) {
; SSE-LABEL: sitofp_4i64_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: cvtsi2ssq %rax, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -1367,7 +1367,7 @@ define <4 x float> @sitofp_4i64_to_4f32(
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_4i64_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX1-NEXT: vmovq %xmm0, %rax
@@ -1384,7 +1384,7 @@ define <4 x float> @sitofp_4i64_to_4f32(
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_4i64_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX2-NEXT: vmovq %xmm0, %rax
@@ -1401,7 +1401,7 @@ define <4 x float> @sitofp_4i64_to_4f32(
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sitofp_4i64_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -1418,7 +1418,7 @@ define <4 x float> @sitofp_4i64_to_4f32(
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_4i64_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -1435,7 +1435,7 @@ define <4 x float> @sitofp_4i64_to_4f32(
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_4i64_to_4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1443,7 +1443,7 @@ define <4 x float> @sitofp_4i64_to_4f32(
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_4i64_to_4f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2ps %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
@@ -1453,13 +1453,13 @@ define <4 x float> @sitofp_4i64_to_4f32(
define <8 x float> @sitofp_8i32_to_8f32(<8 x i32> %a) {
; SSE-LABEL: sitofp_8i32_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: cvtdq2ps %xmm1, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_8i32_to_8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX-NEXT: retq
%cvt = sitofp <8 x i32> %a to <8 x float>
@@ -1468,7 +1468,7 @@ define <8 x float> @sitofp_8i32_to_8f32(
define <8 x float> @sitofp_8i16_to_8f32(<8 x i16> %a) {
; SSE-LABEL: sitofp_8i16_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: psrad $16, %xmm1
; SSE-NEXT: cvtdq2ps %xmm1, %xmm2
@@ -1479,7 +1479,7 @@ define <8 x float> @sitofp_8i16_to_8f32(
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_8i16_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
@@ -1488,13 +1488,13 @@ define <8 x float> @sitofp_8i16_to_8f32(
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_8i16_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_8i16_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -1504,7 +1504,7 @@ define <8 x float> @sitofp_8i16_to_8f32(
define <8 x float> @sitofp_8i8_to_8f32(<16 x i8> %a) {
; SSE-LABEL: sitofp_8i8_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $24, %xmm1
@@ -1518,7 +1518,7 @@ define <8 x float> @sitofp_8i8_to_8f32(<
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_8i8_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
@@ -1527,13 +1527,13 @@ define <8 x float> @sitofp_8i8_to_8f32(<
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_8i8_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_8i8_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -1544,7 +1544,7 @@ define <8 x float> @sitofp_8i8_to_8f32(<
define <8 x float> @sitofp_16i8_to_8f32(<16 x i8> %a) {
; SSE-LABEL: sitofp_16i8_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $24, %xmm1
@@ -1558,7 +1558,7 @@ define <8 x float> @sitofp_16i8_to_8f32(
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_16i8_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
@@ -1567,13 +1567,13 @@ define <8 x float> @sitofp_16i8_to_8f32(
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_16i8_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_16i8_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -1589,12 +1589,12 @@ define <8 x float> @sitofp_16i8_to_8f32(
define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
; SSE-LABEL: uitofp_2i64_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB39_1
-; SSE-NEXT: # BB#2:
+; SSE-NEXT: # %bb.2:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: cvtsi2ssq %rax, %xmm0
; SSE-NEXT: jmp .LBB39_3
@@ -1611,7 +1611,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB39_4
-; SSE-NEXT: # BB#5:
+; SSE-NEXT: # %bb.5:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -1628,11 +1628,11 @@ define <4 x float> @uitofp_2i64_to_4f32(
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_2i64_to_4f32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpextrq $1, %xmm0, %rax
; VEX-NEXT: testq %rax, %rax
; VEX-NEXT: js .LBB39_1
-; VEX-NEXT: # BB#2:
+; VEX-NEXT: # %bb.2:
; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; VEX-NEXT: jmp .LBB39_3
; VEX-NEXT: .LBB39_1:
@@ -1646,7 +1646,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
; VEX-NEXT: vmovq %xmm0, %rax
; VEX-NEXT: testq %rax, %rax
; VEX-NEXT: js .LBB39_4
-; VEX-NEXT: # BB#5:
+; VEX-NEXT: # %bb.5:
; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0
; VEX-NEXT: jmp .LBB39_6
; VEX-NEXT: .LBB39_4:
@@ -1661,14 +1661,14 @@ define <4 x float> @uitofp_2i64_to_4f32(
; VEX-NEXT: testq %rax, %rax
; VEX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; VEX-NEXT: js .LBB39_8
-; VEX-NEXT: # BB#7:
+; VEX-NEXT: # %bb.7:
; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm1
; VEX-NEXT: .LBB39_8:
; VEX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_2i64_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -1679,7 +1679,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_2i64_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -1690,7 +1690,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_2i64_to_4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1698,7 +1698,7 @@ define <4 x float> @uitofp_2i64_to_4f32(
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_2i64_to_4f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2ps %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <2 x i64> %a to <2 x float>
@@ -1708,12 +1708,12 @@ define <4 x float> @uitofp_2i64_to_4f32(
define <4 x float> @uitofp_2i64_to_2f32(<2 x i64> %a) {
; SSE-LABEL: uitofp_2i64_to_2f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB40_1
-; SSE-NEXT: # BB#2:
+; SSE-NEXT: # %bb.2:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: jmp .LBB40_3
@@ -1729,7 +1729,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB40_4
-; SSE-NEXT: # BB#5:
+; SSE-NEXT: # %bb.5:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: cvtsi2ssq %rax, %xmm0
; SSE-NEXT: jmp .LBB40_6
@@ -1747,11 +1747,11 @@ define <4 x float> @uitofp_2i64_to_2f32(
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_2i64_to_2f32:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpextrq $1, %xmm0, %rax
; VEX-NEXT: testq %rax, %rax
; VEX-NEXT: js .LBB40_1
-; VEX-NEXT: # BB#2:
+; VEX-NEXT: # %bb.2:
; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; VEX-NEXT: jmp .LBB40_3
; VEX-NEXT: .LBB40_1:
@@ -1765,7 +1765,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
; VEX-NEXT: vmovq %xmm0, %rax
; VEX-NEXT: testq %rax, %rax
; VEX-NEXT: js .LBB40_4
-; VEX-NEXT: # BB#5:
+; VEX-NEXT: # %bb.5:
; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0
; VEX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
; VEX-NEXT: retq
@@ -1780,7 +1780,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_2i64_to_2f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -1789,7 +1789,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_2i64_to_2f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -1799,7 +1799,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_2i64_to_2f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
@@ -1807,7 +1807,7 @@ define <4 x float> @uitofp_2i64_to_2f32(
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_2i64_to_2f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2ps %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <2 x i64> %a to <2 x float>
@@ -1817,12 +1817,12 @@ define <4 x float> @uitofp_2i64_to_2f32(
define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
; SSE-LABEL: uitofp_4i64_to_4f32_undef:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB41_1
-; SSE-NEXT: # BB#2:
+; SSE-NEXT: # %bb.2:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: cvtsi2ssq %rax, %xmm0
; SSE-NEXT: jmp .LBB41_3
@@ -1839,7 +1839,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB41_4
-; SSE-NEXT: # BB#5:
+; SSE-NEXT: # %bb.5:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: jmp .LBB41_6
@@ -1856,7 +1856,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: js .LBB41_8
-; SSE-NEXT: # BB#7:
+; SSE-NEXT: # %bb.7:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: .LBB41_8:
@@ -1864,11 +1864,11 @@ define <4 x float> @uitofp_4i64_to_4f32_
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_4i64_to_4f32_undef:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vpextrq $1, %xmm0, %rax
; VEX-NEXT: testq %rax, %rax
; VEX-NEXT: js .LBB41_1
-; VEX-NEXT: # BB#2:
+; VEX-NEXT: # %bb.2:
; VEX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; VEX-NEXT: jmp .LBB41_3
; VEX-NEXT: .LBB41_1:
@@ -1882,7 +1882,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
; VEX-NEXT: vmovq %xmm0, %rax
; VEX-NEXT: testq %rax, %rax
; VEX-NEXT: js .LBB41_4
-; VEX-NEXT: # BB#5:
+; VEX-NEXT: # %bb.5:
; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0
; VEX-NEXT: jmp .LBB41_6
; VEX-NEXT: .LBB41_4:
@@ -1897,14 +1897,14 @@ define <4 x float> @uitofp_4i64_to_4f32_
; VEX-NEXT: testq %rax, %rax
; VEX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; VEX-NEXT: js .LBB41_8
-; VEX-NEXT: # BB#7:
+; VEX-NEXT: # %bb.7:
; VEX-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm1
; VEX-NEXT: .LBB41_8:
; VEX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_4i64_to_4f32_undef:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -1915,7 +1915,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i64_to_4f32_undef:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -1926,7 +1926,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i64_to_4f32_undef:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -1934,7 +1934,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i64_to_4f32_undef:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512VLDQ-NEXT: vcvtuqq2ps %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
@@ -1946,7 +1946,7 @@ define <4 x float> @uitofp_4i64_to_4f32_
define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
; SSE-LABEL: uitofp_4i32_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: por {{.*}}(%rip), %xmm1
@@ -1957,7 +1957,7 @@ define <4 x float> @uitofp_4i32_to_4f32(
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_4i32_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
@@ -1966,7 +1966,7 @@ define <4 x float> @uitofp_4i32_to_4f32(
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_4i32_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1258291200,1258291200,1258291200,1258291200]
; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
@@ -1978,7 +1978,7 @@ define <4 x float> @uitofp_4i32_to_4f32(
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_4i32_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1986,12 +1986,12 @@ define <4 x float> @uitofp_4i32_to_4f32(
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i32_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2ps %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i32_to_4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1999,7 +1999,7 @@ define <4 x float> @uitofp_4i32_to_4f32(
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i32_to_4f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2ps %xmm0, %xmm0
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <4 x i32> %a to <4 x float>
@@ -2008,14 +2008,14 @@ define <4 x float> @uitofp_4i32_to_4f32(
define <4 x float> @uitofp_4i16_to_4f32(<8 x i16> %a) {
; SSE-LABEL: uitofp_4i16_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_4i16_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
@@ -2026,14 +2026,14 @@ define <4 x float> @uitofp_4i16_to_4f32(
define <4 x float> @uitofp_8i16_to_4f32(<8 x i16> %a) {
; SSE-LABEL: uitofp_8i16_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_8i16_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -2044,7 +2044,7 @@ define <4 x float> @uitofp_8i16_to_4f32(
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_8i16_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -2052,7 +2052,7 @@ define <4 x float> @uitofp_8i16_to_4f32(
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_8i16_to_4f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -2065,7 +2065,7 @@ define <4 x float> @uitofp_8i16_to_4f32(
define <4 x float> @uitofp_4i8_to_4f32(<16 x i8> %a) {
; SSE-LABEL: uitofp_4i8_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -2073,7 +2073,7 @@ define <4 x float> @uitofp_4i8_to_4f32(<
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_4i8_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
@@ -2084,7 +2084,7 @@ define <4 x float> @uitofp_4i8_to_4f32(<
define <4 x float> @uitofp_16i8_to_4f32(<16 x i8> %a) {
; SSE-LABEL: uitofp_16i8_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -2092,7 +2092,7 @@ define <4 x float> @uitofp_16i8_to_4f32(
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_16i8_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -2103,7 +2103,7 @@ define <4 x float> @uitofp_16i8_to_4f32(
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_16i8_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -2111,7 +2111,7 @@ define <4 x float> @uitofp_16i8_to_4f32(
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_16i8_to_4f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -2124,11 +2124,11 @@ define <4 x float> @uitofp_16i8_to_4f32(
define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
; SSE-LABEL: uitofp_4i64_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB47_1
-; SSE-NEXT: # BB#2:
+; SSE-NEXT: # %bb.2:
; SSE-NEXT: cvtsi2ssq %rax, %xmm2
; SSE-NEXT: jmp .LBB47_3
; SSE-NEXT: .LBB47_1:
@@ -2143,7 +2143,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB47_4
-; SSE-NEXT: # BB#5:
+; SSE-NEXT: # %bb.5:
; SSE-NEXT: cvtsi2ssq %rax, %xmm3
; SSE-NEXT: jmp .LBB47_6
; SSE-NEXT: .LBB47_4:
@@ -2157,7 +2157,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB47_7
-; SSE-NEXT: # BB#8:
+; SSE-NEXT: # %bb.8:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: jmp .LBB47_9
@@ -2175,7 +2175,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB47_10
-; SSE-NEXT: # BB#11:
+; SSE-NEXT: # %bb.11:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: cvtsi2ssq %rax, %xmm0
; SSE-NEXT: jmp .LBB47_12
@@ -2194,11 +2194,11 @@ define <4 x float> @uitofp_4i64_to_4f32(
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_4i64_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB47_1
-; AVX1-NEXT: # BB#2:
+; AVX1-NEXT: # %bb.2:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX1-NEXT: jmp .LBB47_3
; AVX1-NEXT: .LBB47_1:
@@ -2212,7 +2212,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB47_4
-; AVX1-NEXT: # BB#5:
+; AVX1-NEXT: # %bb.5:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
; AVX1-NEXT: jmp .LBB47_6
; AVX1-NEXT: .LBB47_4:
@@ -2228,7 +2228,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB47_7
-; AVX1-NEXT: # BB#8:
+; AVX1-NEXT: # %bb.8:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2
; AVX1-NEXT: jmp .LBB47_9
; AVX1-NEXT: .LBB47_7:
@@ -2243,7 +2243,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB47_10
-; AVX1-NEXT: # BB#11:
+; AVX1-NEXT: # %bb.11:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX1-NEXT: vzeroupper
@@ -2260,11 +2260,11 @@ define <4 x float> @uitofp_4i64_to_4f32(
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_4i64_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB47_1
-; AVX2-NEXT: # BB#2:
+; AVX2-NEXT: # %bb.2:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX2-NEXT: jmp .LBB47_3
; AVX2-NEXT: .LBB47_1:
@@ -2278,7 +2278,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB47_4
-; AVX2-NEXT: # BB#5:
+; AVX2-NEXT: # %bb.5:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
; AVX2-NEXT: jmp .LBB47_6
; AVX2-NEXT: .LBB47_4:
@@ -2294,7 +2294,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB47_7
-; AVX2-NEXT: # BB#8:
+; AVX2-NEXT: # %bb.8:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2
; AVX2-NEXT: jmp .LBB47_9
; AVX2-NEXT: .LBB47_7:
@@ -2309,7 +2309,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB47_10
-; AVX2-NEXT: # BB#11:
+; AVX2-NEXT: # %bb.11:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX2-NEXT: vzeroupper
@@ -2326,7 +2326,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_4i64_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; AVX512F-NEXT: vmovq %xmm0, %rax
@@ -2343,7 +2343,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i64_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; AVX512VL-NEXT: vmovq %xmm0, %rax
@@ -2360,7 +2360,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i64_to_4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -2368,7 +2368,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i64_to_4f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2ps %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
@@ -2378,7 +2378,7 @@ define <4 x float> @uitofp_4i64_to_4f32(
define <8 x float> @uitofp_8i32_to_8f32(<8 x i32> %a) {
; SSE-LABEL: uitofp_8i32_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pand %xmm2, %xmm3
@@ -2399,7 +2399,7 @@ define <8 x float> @uitofp_8i32_to_8f32(
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_8i32_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpsrld $16, %xmm2, %xmm2
@@ -2412,7 +2412,7 @@ define <8 x float> @uitofp_8i32_to_8f32(
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_8i32_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200]
; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
@@ -2424,26 +2424,26 @@ define <8 x float> @uitofp_8i32_to_8f32(
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_8i32_to_8f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_8i32_to_8f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2ps %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_8i32_to_8f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_8i32_to_8f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2ps %ymm0, %ymm0
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <8 x i32> %a to <8 x float>
@@ -2452,7 +2452,7 @@ define <8 x float> @uitofp_8i32_to_8f32(
define <8 x float> @uitofp_8i16_to_8f32(<8 x i16> %a) {
; SSE-LABEL: uitofp_8i16_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
@@ -2463,7 +2463,7 @@ define <8 x float> @uitofp_8i16_to_8f32(
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_8i16_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -2472,13 +2472,13 @@ define <8 x float> @uitofp_8i16_to_8f32(
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_8i16_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_8i16_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -2488,7 +2488,7 @@ define <8 x float> @uitofp_8i16_to_8f32(
define <8 x float> @uitofp_8i8_to_8f32(<16 x i8> %a) {
; SSE-LABEL: uitofp_8i8_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: movdqa %xmm0, %xmm2
@@ -2500,7 +2500,7 @@ define <8 x float> @uitofp_8i8_to_8f32(<
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_8i8_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -2509,13 +2509,13 @@ define <8 x float> @uitofp_8i8_to_8f32(<
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_8i8_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_8i8_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -2526,7 +2526,7 @@ define <8 x float> @uitofp_8i8_to_8f32(<
define <8 x float> @uitofp_16i8_to_8f32(<16 x i8> %a) {
; SSE-LABEL: uitofp_16i8_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: movdqa %xmm0, %xmm2
@@ -2538,7 +2538,7 @@ define <8 x float> @uitofp_16i8_to_8f32(
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_16i8_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
@@ -2547,13 +2547,13 @@ define <8 x float> @uitofp_16i8_to_8f32(
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_16i8_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_16i8_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -2569,7 +2569,7 @@ define <8 x float> @uitofp_16i8_to_8f32(
define <2 x double> @sitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; SSE-LABEL: sitofp_load_2i64_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: cvtsi2sdq %rax, %xmm0
@@ -2581,7 +2581,7 @@ define <2 x double> @sitofp_load_2i64_to
; SSE-NEXT: retq
;
; VEX-LABEL: sitofp_load_2i64_to_2f64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovdqa (%rdi), %xmm0
; VEX-NEXT: vpextrq $1, %xmm0, %rax
; VEX-NEXT: vcvtsi2sdq %rax, %xmm1, %xmm1
@@ -2591,7 +2591,7 @@ define <2 x double> @sitofp_load_2i64_to
; VEX-NEXT: retq
;
; AVX512F-LABEL: sitofp_load_2i64_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtsi2sdq %rax, %xmm1, %xmm1
@@ -2601,7 +2601,7 @@ define <2 x double> @sitofp_load_2i64_to
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_load_2i64_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtsi2sdq %rax, %xmm1, %xmm1
@@ -2611,7 +2611,7 @@ define <2 x double> @sitofp_load_2i64_to
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_load_2i64_to_2f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -2619,7 +2619,7 @@ define <2 x double> @sitofp_load_2i64_to
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_load_2i64_to_2f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2pd (%rdi), %xmm0
; AVX512VLDQ-NEXT: retq
%ld = load <2 x i64>, <2 x i64> *%a
@@ -2629,12 +2629,12 @@ define <2 x double> @sitofp_load_2i64_to
define <2 x double> @sitofp_load_2i32_to_2f64(<2 x i32> *%a) {
; SSE-LABEL: sitofp_load_2i32_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtdq2pd (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_2i32_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2pd (%rdi), %xmm0
; AVX-NEXT: retq
%ld = load <2 x i32>, <2 x i32> *%a
@@ -2644,7 +2644,7 @@ define <2 x double> @sitofp_load_2i32_to
define <2 x double> @sitofp_load_2i16_to_2f64(<2 x i16> *%a) {
; SSE-LABEL: sitofp_load_2i16_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
; SSE-NEXT: psrad $16, %xmm0
@@ -2652,7 +2652,7 @@ define <2 x double> @sitofp_load_2i16_to
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_2i16_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxwq (%rdi), %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
@@ -2664,7 +2664,7 @@ define <2 x double> @sitofp_load_2i16_to
define <2 x double> @sitofp_load_2i8_to_2f64(<2 x i8> *%a) {
; SSE-LABEL: sitofp_load_2i8_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movzwl (%rdi), %eax
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -2674,7 +2674,7 @@ define <2 x double> @sitofp_load_2i8_to_
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_2i8_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbq (%rdi), %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
@@ -2686,7 +2686,7 @@ define <2 x double> @sitofp_load_2i8_to_
define <4 x double> @sitofp_load_4i64_to_4f64(<4 x i64> *%a) {
; SSE-LABEL: sitofp_load_4i64_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: movdqa 16(%rdi), %xmm2
; SSE-NEXT: movq %xmm1, %rax
@@ -2707,7 +2707,7 @@ define <4 x double> @sitofp_load_4i64_to
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_load_4i64_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm1, %rax
@@ -2724,7 +2724,7 @@ define <4 x double> @sitofp_load_4i64_to
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_load_4i64_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpextrq $1, %xmm1, %rax
@@ -2741,7 +2741,7 @@ define <4 x double> @sitofp_load_4i64_to
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sitofp_load_4i64_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vpextrq $1, %xmm1, %rax
@@ -2758,7 +2758,7 @@ define <4 x double> @sitofp_load_4i64_to
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_load_4i64_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vpextrq $1, %xmm1, %rax
@@ -2775,14 +2775,14 @@ define <4 x double> @sitofp_load_4i64_to
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_load_4i64_to_4f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_load_4i64_to_4f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2pd (%rdi), %ymm0
; AVX512VLDQ-NEXT: retq
%ld = load <4 x i64>, <4 x i64> *%a
@@ -2792,7 +2792,7 @@ define <4 x double> @sitofp_load_4i64_to
define <4 x double> @sitofp_load_4i32_to_4f64(<4 x i32> *%a) {
; SSE-LABEL: sitofp_load_4i32_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: cvtdq2pd %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
@@ -2800,7 +2800,7 @@ define <4 x double> @sitofp_load_4i32_to
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_4i32_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2pd (%rdi), %ymm0
; AVX-NEXT: retq
%ld = load <4 x i32>, <4 x i32> *%a
@@ -2810,7 +2810,7 @@ define <4 x double> @sitofp_load_4i32_to
define <4 x double> @sitofp_load_4i16_to_4f64(<4 x i16> *%a) {
; SSE-LABEL: sitofp_load_4i16_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE-NEXT: psrad $16, %xmm1
@@ -2820,7 +2820,7 @@ define <4 x double> @sitofp_load_4i16_to
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_4i16_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxwd (%rdi), %xmm0
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
@@ -2831,7 +2831,7 @@ define <4 x double> @sitofp_load_4i16_to
define <4 x double> @sitofp_load_4i8_to_4f64(<4 x i8> *%a) {
; SSE-LABEL: sitofp_load_4i8_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -2842,7 +2842,7 @@ define <4 x double> @sitofp_load_4i8_to_
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_4i8_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbd (%rdi), %xmm0
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
@@ -2857,7 +2857,7 @@ define <4 x double> @sitofp_load_4i8_to_
define <2 x double> @uitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; SSE-LABEL: uitofp_load_2i64_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
@@ -2874,7 +2874,7 @@ define <2 x double> @uitofp_load_2i64_to
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_load_2i64_to_2f64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovapd (%rdi), %xmm0
; VEX-NEXT: vmovapd {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
; VEX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -2887,7 +2887,7 @@ define <2 x double> @uitofp_load_2i64_to
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_load_2i64_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtusi2sdq %rax, %xmm1, %xmm1
@@ -2897,7 +2897,7 @@ define <2 x double> @uitofp_load_2i64_to
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_2i64_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtusi2sdq %rax, %xmm1, %xmm1
@@ -2907,7 +2907,7 @@ define <2 x double> @uitofp_load_2i64_to
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_2i64_to_2f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -2915,7 +2915,7 @@ define <2 x double> @uitofp_load_2i64_to
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_2i64_to_2f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2pd (%rdi), %xmm0
; AVX512VLDQ-NEXT: retq
%ld = load <2 x i64>, <2 x i64> *%a
@@ -2925,7 +2925,7 @@ define <2 x double> @uitofp_load_2i64_to
define <2 x double> @uitofp_load_2i32_to_2f64(<2 x i32> *%a) {
; SSE-LABEL: uitofp_load_2i32_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,0,65535,0,65535,0]
; SSE-NEXT: pand %xmm0, %xmm1
@@ -2937,7 +2937,7 @@ define <2 x double> @uitofp_load_2i32_to
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_load_2i32_to_2f64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; VEX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VEX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
@@ -2949,7 +2949,7 @@ define <2 x double> @uitofp_load_2i32_to
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_load_2i32_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -2957,12 +2957,12 @@ define <2 x double> @uitofp_load_2i32_to
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_2i32_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2pd (%rdi), %xmm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_2i32_to_2f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -2970,7 +2970,7 @@ define <2 x double> @uitofp_load_2i32_to
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_2i32_to_2f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2pd (%rdi), %xmm0
; AVX512VLDQ-NEXT: retq
%ld = load <2 x i32>, <2 x i32> *%a
@@ -2980,7 +2980,7 @@ define <2 x double> @uitofp_load_2i32_to
define <2 x double> @uitofp_load_2i16_to_2f64(<2 x i16> *%a) {
; SSE-LABEL: uitofp_load_2i16_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -2988,7 +2988,7 @@ define <2 x double> @uitofp_load_2i16_to
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_load_2i16_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
@@ -3000,7 +3000,7 @@ define <2 x double> @uitofp_load_2i16_to
define <2 x double> @uitofp_load_2i8_to_2f64(<2 x i8> *%a) {
; SSE-LABEL: uitofp_load_2i8_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movzwl (%rdi), %eax
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm1
@@ -3010,7 +3010,7 @@ define <2 x double> @uitofp_load_2i8_to_
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_load_2i8_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
@@ -3022,7 +3022,7 @@ define <2 x double> @uitofp_load_2i8_to_
define <4 x double> @uitofp_load_4i64_to_4f64(<4 x i64> *%a) {
; SSE-LABEL: uitofp_load_4i64_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: movdqa 16(%rdi), %xmm2
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [1127219200,1160773632,0,0]
@@ -3050,7 +3050,7 @@ define <4 x double> @uitofp_load_4i64_to
; SSE-NEXT: retq
;
; VEX-LABEL: uitofp_load_4i64_to_4f64:
-; VEX: # BB#0:
+; VEX: # %bb.0:
; VEX-NEXT: vmovapd (%rdi), %ymm0
; VEX-NEXT: vextractf128 $1, %ymm0, %xmm1
; VEX-NEXT: vmovapd {{.*#+}} xmm2 = [1127219200,1160773632,0,0]
@@ -3071,7 +3071,7 @@ define <4 x double> @uitofp_load_4i64_to
; VEX-NEXT: retq
;
; AVX512F-LABEL: uitofp_load_4i64_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vpextrq $1, %xmm1, %rax
@@ -3088,7 +3088,7 @@ define <4 x double> @uitofp_load_4i64_to
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_4i64_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vpextrq $1, %xmm1, %rax
@@ -3105,14 +3105,14 @@ define <4 x double> @uitofp_load_4i64_to
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_4i64_to_4f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_4i64_to_4f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2pd (%rdi), %ymm0
; AVX512VLDQ-NEXT: retq
%ld = load <4 x i64>, <4 x i64> *%a
@@ -3122,7 +3122,7 @@ define <4 x double> @uitofp_load_4i64_to
define <4 x double> @uitofp_load_4i32_to_4f64(<4 x i32> *%a) {
; SSE-LABEL: uitofp_load_4i32_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrld $16, %xmm1
@@ -3144,7 +3144,7 @@ define <4 x double> @uitofp_load_4i32_to
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_load_4i32_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
@@ -3156,7 +3156,7 @@ define <4 x double> @uitofp_load_4i32_to
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_load_4i32_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %xmm0
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX2-NEXT: vcvtdq2pd %xmm1, %ymm1
@@ -3169,26 +3169,26 @@ define <4 x double> @uitofp_load_4i32_to
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_load_4i32_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps (%rdi), %xmm0
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_4i32_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2pd (%rdi), %ymm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_4i32_to_4f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_4i32_to_4f64:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2pd (%rdi), %ymm0
; AVX512VLDQ-NEXT: retq
%ld = load <4 x i32>, <4 x i32> *%a
@@ -3198,7 +3198,7 @@ define <4 x double> @uitofp_load_4i32_to
define <4 x double> @uitofp_load_4i16_to_4f64(<4 x i16> *%a) {
; SSE-LABEL: uitofp_load_4i16_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: pxor %xmm0, %xmm0
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -3208,7 +3208,7 @@ define <4 x double> @uitofp_load_4i16_to
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_load_4i16_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
@@ -3219,7 +3219,7 @@ define <4 x double> @uitofp_load_4i16_to
define <4 x double> @uitofp_load_4i8_to_4f64(<4 x i8> *%a) {
; SSE-LABEL: uitofp_load_4i8_to_4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE-NEXT: pxor %xmm0, %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -3230,7 +3230,7 @@ define <4 x double> @uitofp_load_4i8_to_
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_load_4i8_to_4f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX-NEXT: retq
@@ -3245,7 +3245,7 @@ define <4 x double> @uitofp_load_4i8_to_
define <4 x float> @sitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; SSE-LABEL: sitofp_load_4i64_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: movdqa 16(%rdi), %xmm0
; SSE-NEXT: movq %xmm0, %rax
@@ -3267,7 +3267,7 @@ define <4 x float> @sitofp_load_4i64_to_
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_load_4i64_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
@@ -3285,7 +3285,7 @@ define <4 x float> @sitofp_load_4i64_to_
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_load_4i64_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
@@ -3303,7 +3303,7 @@ define <4 x float> @sitofp_load_4i64_to_
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sitofp_load_4i64_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
@@ -3321,7 +3321,7 @@ define <4 x float> @sitofp_load_4i64_to_
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_load_4i64_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
@@ -3339,7 +3339,7 @@ define <4 x float> @sitofp_load_4i64_to_
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_load_4i64_to_4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -3347,7 +3347,7 @@ define <4 x float> @sitofp_load_4i64_to_
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_load_4i64_to_4f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2psy (%rdi), %xmm0
; AVX512VLDQ-NEXT: retq
%ld = load <4 x i64>, <4 x i64> *%a
@@ -3357,12 +3357,12 @@ define <4 x float> @sitofp_load_4i64_to_
define <4 x float> @sitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; SSE-LABEL: sitofp_load_4i32_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtdq2ps (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_4i32_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2ps (%rdi), %xmm0
; AVX-NEXT: retq
%ld = load <4 x i32>, <4 x i32> *%a
@@ -3372,7 +3372,7 @@ define <4 x float> @sitofp_load_4i32_to_
define <4 x float> @sitofp_load_4i16_to_4f32(<4 x i16> *%a) {
; SSE-LABEL: sitofp_load_4i16_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $16, %xmm0
@@ -3380,7 +3380,7 @@ define <4 x float> @sitofp_load_4i16_to_
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_4i16_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxwd (%rdi), %xmm0
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
@@ -3391,7 +3391,7 @@ define <4 x float> @sitofp_load_4i16_to_
define <4 x float> @sitofp_load_4i8_to_4f32(<4 x i8> *%a) {
; SSE-LABEL: sitofp_load_4i8_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -3400,7 +3400,7 @@ define <4 x float> @sitofp_load_4i8_to_4
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_4i8_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovsxbd (%rdi), %xmm0
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
@@ -3411,7 +3411,7 @@ define <4 x float> @sitofp_load_4i8_to_4
define <8 x float> @sitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; SSE-LABEL: sitofp_load_8i64_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: movdqa 16(%rdi), %xmm0
; SSE-NEXT: movdqa 32(%rdi), %xmm2
@@ -3452,7 +3452,7 @@ define <8 x float> @sitofp_load_8i64_to_
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_load_8i64_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX1-NEXT: vpextrq $1, %xmm1, %rax
@@ -3483,7 +3483,7 @@ define <8 x float> @sitofp_load_8i64_to_
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_load_8i64_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX2-NEXT: vpextrq $1, %xmm1, %rax
@@ -3514,7 +3514,7 @@ define <8 x float> @sitofp_load_8i64_to_
; AVX2-NEXT: retq
;
; AVX512F-LABEL: sitofp_load_8i64_to_8f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; AVX512F-NEXT: vpextrq $1, %xmm1, %rax
@@ -3545,7 +3545,7 @@ define <8 x float> @sitofp_load_8i64_to_
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: sitofp_load_8i64_to_8f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512VL-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; AVX512VL-NEXT: vpextrq $1, %xmm1, %rax
@@ -3576,12 +3576,12 @@ define <8 x float> @sitofp_load_8i64_to_
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_load_8i64_to_8f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvtqq2ps (%rdi), %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_load_8i64_to_8f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtqq2ps (%rdi), %ymm0
; AVX512VLDQ-NEXT: retq
%ld = load <8 x i64>, <8 x i64> *%a
@@ -3591,13 +3591,13 @@ define <8 x float> @sitofp_load_8i64_to_
define <8 x float> @sitofp_load_8i32_to_8f32(<8 x i32> *%a) {
; SSE-LABEL: sitofp_load_8i32_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtdq2ps (%rdi), %xmm0
; SSE-NEXT: cvtdq2ps 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_load_8i32_to_8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2ps (%rdi), %ymm0
; AVX-NEXT: retq
%ld = load <8 x i32>, <8 x i32> *%a
@@ -3607,7 +3607,7 @@ define <8 x float> @sitofp_load_8i32_to_
define <8 x float> @sitofp_load_8i16_to_8f32(<8 x i16> *%a) {
; SSE-LABEL: sitofp_load_8i16_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE-NEXT: psrad $16, %xmm0
@@ -3619,7 +3619,7 @@ define <8 x float> @sitofp_load_8i16_to_
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_load_8i16_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxwd (%rdi), %xmm0
; AVX1-NEXT: vpmovsxwd 8(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -3627,13 +3627,13 @@ define <8 x float> @sitofp_load_8i16_to_
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_load_8i16_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd (%rdi), %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_load_8i16_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd (%rdi), %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -3644,7 +3644,7 @@ define <8 x float> @sitofp_load_8i16_to_
define <8 x float> @sitofp_load_8i8_to_8f32(<8 x i8> *%a) {
; SSE-LABEL: sitofp_load_8i8_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -3658,7 +3658,7 @@ define <8 x float> @sitofp_load_8i8_to_8
; SSE-NEXT: retq
;
; AVX1-LABEL: sitofp_load_8i8_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbw (%rdi), %xmm0
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -3668,13 +3668,13 @@ define <8 x float> @sitofp_load_8i8_to_8
; AVX1-NEXT: retq
;
; AVX2-LABEL: sitofp_load_8i8_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd (%rdi), %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: sitofp_load_8i8_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd (%rdi), %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -3689,13 +3689,13 @@ define <8 x float> @sitofp_load_8i8_to_8
define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; SSE-LABEL: uitofp_load_4i64_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm2
; SSE-NEXT: movdqa 16(%rdi), %xmm0
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB76_1
-; SSE-NEXT: # BB#2:
+; SSE-NEXT: # %bb.2:
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: jmp .LBB76_3
; SSE-NEXT: .LBB76_1:
@@ -3710,7 +3710,7 @@ define <4 x float> @uitofp_load_4i64_to_
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB76_4
-; SSE-NEXT: # BB#5:
+; SSE-NEXT: # %bb.5:
; SSE-NEXT: cvtsi2ssq %rax, %xmm3
; SSE-NEXT: jmp .LBB76_6
; SSE-NEXT: .LBB76_4:
@@ -3724,7 +3724,7 @@ define <4 x float> @uitofp_load_4i64_to_
; SSE-NEXT: movq %xmm2, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB76_7
-; SSE-NEXT: # BB#8:
+; SSE-NEXT: # %bb.8:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: cvtsi2ssq %rax, %xmm0
; SSE-NEXT: jmp .LBB76_9
@@ -3742,7 +3742,7 @@ define <4 x float> @uitofp_load_4i64_to_
; SSE-NEXT: movq %xmm2, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB76_10
-; SSE-NEXT: # BB#11:
+; SSE-NEXT: # %bb.11:
; SSE-NEXT: xorps %xmm2, %xmm2
; SSE-NEXT: cvtsi2ssq %rax, %xmm2
; SSE-NEXT: jmp .LBB76_12
@@ -3760,12 +3760,12 @@ define <4 x float> @uitofp_load_4i64_to_
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_load_4i64_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB76_1
-; AVX1-NEXT: # BB#2:
+; AVX1-NEXT: # %bb.2:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX1-NEXT: jmp .LBB76_3
; AVX1-NEXT: .LBB76_1:
@@ -3779,7 +3779,7 @@ define <4 x float> @uitofp_load_4i64_to_
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB76_4
-; AVX1-NEXT: # BB#5:
+; AVX1-NEXT: # %bb.5:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
; AVX1-NEXT: jmp .LBB76_6
; AVX1-NEXT: .LBB76_4:
@@ -3795,7 +3795,7 @@ define <4 x float> @uitofp_load_4i64_to_
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB76_7
-; AVX1-NEXT: # BB#8:
+; AVX1-NEXT: # %bb.8:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2
; AVX1-NEXT: jmp .LBB76_9
; AVX1-NEXT: .LBB76_7:
@@ -3810,7 +3810,7 @@ define <4 x float> @uitofp_load_4i64_to_
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB76_10
-; AVX1-NEXT: # BB#11:
+; AVX1-NEXT: # %bb.11:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX1-NEXT: vzeroupper
@@ -3827,12 +3827,12 @@ define <4 x float> @uitofp_load_4i64_to_
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_load_4i64_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB76_1
-; AVX2-NEXT: # BB#2:
+; AVX2-NEXT: # %bb.2:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX2-NEXT: jmp .LBB76_3
; AVX2-NEXT: .LBB76_1:
@@ -3846,7 +3846,7 @@ define <4 x float> @uitofp_load_4i64_to_
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB76_4
-; AVX2-NEXT: # BB#5:
+; AVX2-NEXT: # %bb.5:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
; AVX2-NEXT: jmp .LBB76_6
; AVX2-NEXT: .LBB76_4:
@@ -3862,7 +3862,7 @@ define <4 x float> @uitofp_load_4i64_to_
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB76_7
-; AVX2-NEXT: # BB#8:
+; AVX2-NEXT: # %bb.8:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm2
; AVX2-NEXT: jmp .LBB76_9
; AVX2-NEXT: .LBB76_7:
@@ -3877,7 +3877,7 @@ define <4 x float> @uitofp_load_4i64_to_
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB76_10
-; AVX2-NEXT: # BB#11:
+; AVX2-NEXT: # %bb.11:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX2-NEXT: vzeroupper
@@ -3894,7 +3894,7 @@ define <4 x float> @uitofp_load_4i64_to_
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_load_4i64_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
; AVX512F-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
@@ -3912,7 +3912,7 @@ define <4 x float> @uitofp_load_4i64_to_
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_4i64_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
; AVX512VL-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
@@ -3930,7 +3930,7 @@ define <4 x float> @uitofp_load_4i64_to_
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_4i64_to_4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -3938,7 +3938,7 @@ define <4 x float> @uitofp_load_4i64_to_
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_4i64_to_4f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2psy (%rdi), %xmm0
; AVX512VLDQ-NEXT: retq
%ld = load <4 x i64>, <4 x i64> *%a
@@ -3948,7 +3948,7 @@ define <4 x float> @uitofp_load_4i64_to_
define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; SSE-LABEL: uitofp_load_4i32_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSE-NEXT: pand %xmm0, %xmm1
@@ -3960,7 +3960,7 @@ define <4 x float> @uitofp_load_4i32_to_
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_load_4i32_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
@@ -3970,7 +3970,7 @@ define <4 x float> @uitofp_load_4i32_to_
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_load_4i32_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %xmm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1258291200,1258291200,1258291200,1258291200]
; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
@@ -3983,7 +3983,7 @@ define <4 x float> @uitofp_load_4i32_to_
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_load_4i32_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps (%rdi), %xmm0
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -3991,12 +3991,12 @@ define <4 x float> @uitofp_load_4i32_to_
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_4i32_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2ps (%rdi), %xmm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_4i32_to_4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -4004,7 +4004,7 @@ define <4 x float> @uitofp_load_4i32_to_
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_4i32_to_4f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2ps (%rdi), %xmm0
; AVX512VLDQ-NEXT: retq
%ld = load <4 x i32>, <4 x i32> *%a
@@ -4014,7 +4014,7 @@ define <4 x float> @uitofp_load_4i32_to_
define <4 x float> @uitofp_load_4i16_to_4f32(<4 x i16> *%a) {
; SSE-LABEL: uitofp_load_4i16_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -4022,7 +4022,7 @@ define <4 x float> @uitofp_load_4i16_to_
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_load_4i16_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
@@ -4033,7 +4033,7 @@ define <4 x float> @uitofp_load_4i16_to_
define <4 x float> @uitofp_load_4i8_to_4f32(<4 x i8> *%a) {
; SSE-LABEL: uitofp_load_4i8_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -4042,7 +4042,7 @@ define <4 x float> @uitofp_load_4i8_to_4
; SSE-NEXT: retq
;
; AVX-LABEL: uitofp_load_4i8_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX-NEXT: retq
@@ -4053,7 +4053,7 @@ define <4 x float> @uitofp_load_4i8_to_4
define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; SSE-LABEL: uitofp_load_8i64_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm5
; SSE-NEXT: movdqa 16(%rdi), %xmm0
; SSE-NEXT: movdqa 32(%rdi), %xmm2
@@ -4061,7 +4061,7 @@ define <8 x float> @uitofp_load_8i64_to_
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB80_1
-; SSE-NEXT: # BB#2:
+; SSE-NEXT: # %bb.2:
; SSE-NEXT: cvtsi2ssq %rax, %xmm3
; SSE-NEXT: jmp .LBB80_3
; SSE-NEXT: .LBB80_1:
@@ -4076,7 +4076,7 @@ define <8 x float> @uitofp_load_8i64_to_
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB80_4
-; SSE-NEXT: # BB#5:
+; SSE-NEXT: # %bb.5:
; SSE-NEXT: cvtsi2ssq %rax, %xmm4
; SSE-NEXT: jmp .LBB80_6
; SSE-NEXT: .LBB80_4:
@@ -4090,7 +4090,7 @@ define <8 x float> @uitofp_load_8i64_to_
; SSE-NEXT: movq %xmm5, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB80_7
-; SSE-NEXT: # BB#8:
+; SSE-NEXT: # %bb.8:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: cvtsi2ssq %rax, %xmm0
; SSE-NEXT: jmp .LBB80_9
@@ -4107,7 +4107,7 @@ define <8 x float> @uitofp_load_8i64_to_
; SSE-NEXT: movq %xmm5, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB80_10
-; SSE-NEXT: # BB#11:
+; SSE-NEXT: # %bb.11:
; SSE-NEXT: cvtsi2ssq %rax, %xmm6
; SSE-NEXT: jmp .LBB80_12
; SSE-NEXT: .LBB80_10:
@@ -4121,7 +4121,7 @@ define <8 x float> @uitofp_load_8i64_to_
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB80_13
-; SSE-NEXT: # BB#14:
+; SSE-NEXT: # %bb.14:
; SSE-NEXT: xorps %xmm5, %xmm5
; SSE-NEXT: cvtsi2ssq %rax, %xmm5
; SSE-NEXT: jmp .LBB80_15
@@ -4138,7 +4138,7 @@ define <8 x float> @uitofp_load_8i64_to_
; SSE-NEXT: movq %xmm1, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB80_16
-; SSE-NEXT: # BB#17:
+; SSE-NEXT: # %bb.17:
; SSE-NEXT: cvtsi2ssq %rax, %xmm7
; SSE-NEXT: jmp .LBB80_18
; SSE-NEXT: .LBB80_16:
@@ -4154,7 +4154,7 @@ define <8 x float> @uitofp_load_8i64_to_
; SSE-NEXT: movq %xmm2, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB80_19
-; SSE-NEXT: # BB#20:
+; SSE-NEXT: # %bb.20:
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: cvtsi2ssq %rax, %xmm1
; SSE-NEXT: jmp .LBB80_21
@@ -4173,7 +4173,7 @@ define <8 x float> @uitofp_load_8i64_to_
; SSE-NEXT: movq %xmm2, %rax
; SSE-NEXT: testq %rax, %rax
; SSE-NEXT: js .LBB80_22
-; SSE-NEXT: # BB#23:
+; SSE-NEXT: # %bb.23:
; SSE-NEXT: xorps %xmm2, %xmm2
; SSE-NEXT: cvtsi2ssq %rax, %xmm2
; SSE-NEXT: jmp .LBB80_24
@@ -4191,13 +4191,13 @@ define <8 x float> @uitofp_load_8i64_to_
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_load_8i64_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vmovdqa 32(%rdi), %ymm2
; AVX1-NEXT: vpextrq $1, %xmm2, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_1
-; AVX1-NEXT: # BB#2:
+; AVX1-NEXT: # %bb.2:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX1-NEXT: jmp .LBB80_3
; AVX1-NEXT: .LBB80_1:
@@ -4211,7 +4211,7 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX1-NEXT: vmovq %xmm2, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_4
-; AVX1-NEXT: # BB#5:
+; AVX1-NEXT: # %bb.5:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3
; AVX1-NEXT: jmp .LBB80_6
; AVX1-NEXT: .LBB80_4:
@@ -4226,7 +4226,7 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX1-NEXT: vmovq %xmm2, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_7
-; AVX1-NEXT: # BB#8:
+; AVX1-NEXT: # %bb.8:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4
; AVX1-NEXT: jmp .LBB80_9
; AVX1-NEXT: .LBB80_7:
@@ -4240,7 +4240,7 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX1-NEXT: vpextrq $1, %xmm2, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_10
-; AVX1-NEXT: # BB#11:
+; AVX1-NEXT: # %bb.11:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm2
; AVX1-NEXT: jmp .LBB80_12
; AVX1-NEXT: .LBB80_10:
@@ -4254,7 +4254,7 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_13
-; AVX1-NEXT: # BB#14:
+; AVX1-NEXT: # %bb.14:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm5
; AVX1-NEXT: jmp .LBB80_15
; AVX1-NEXT: .LBB80_13:
@@ -4269,7 +4269,7 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_16
-; AVX1-NEXT: # BB#17:
+; AVX1-NEXT: # %bb.17:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3
; AVX1-NEXT: jmp .LBB80_18
; AVX1-NEXT: .LBB80_16:
@@ -4286,7 +4286,7 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX1-NEXT: vmovq %xmm4, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_19
-; AVX1-NEXT: # BB#20:
+; AVX1-NEXT: # %bb.20:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm5
; AVX1-NEXT: jmp .LBB80_21
; AVX1-NEXT: .LBB80_19:
@@ -4302,7 +4302,7 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX1-NEXT: vpextrq $1, %xmm4, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_22
-; AVX1-NEXT: # BB#23:
+; AVX1-NEXT: # %bb.23:
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2
; AVX1-NEXT: jmp .LBB80_24
; AVX1-NEXT: .LBB80_22:
@@ -4318,13 +4318,13 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_load_8i64_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vmovdqa 32(%rdi), %ymm2
; AVX2-NEXT: vpextrq $1, %xmm2, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_1
-; AVX2-NEXT: # BB#2:
+; AVX2-NEXT: # %bb.2:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; AVX2-NEXT: jmp .LBB80_3
; AVX2-NEXT: .LBB80_1:
@@ -4338,7 +4338,7 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX2-NEXT: vmovq %xmm2, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_4
-; AVX2-NEXT: # BB#5:
+; AVX2-NEXT: # %bb.5:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3
; AVX2-NEXT: jmp .LBB80_6
; AVX2-NEXT: .LBB80_4:
@@ -4353,7 +4353,7 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX2-NEXT: vmovq %xmm2, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_7
-; AVX2-NEXT: # BB#8:
+; AVX2-NEXT: # %bb.8:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4
; AVX2-NEXT: jmp .LBB80_9
; AVX2-NEXT: .LBB80_7:
@@ -4367,7 +4367,7 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX2-NEXT: vpextrq $1, %xmm2, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_10
-; AVX2-NEXT: # BB#11:
+; AVX2-NEXT: # %bb.11:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm2
; AVX2-NEXT: jmp .LBB80_12
; AVX2-NEXT: .LBB80_10:
@@ -4381,7 +4381,7 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_13
-; AVX2-NEXT: # BB#14:
+; AVX2-NEXT: # %bb.14:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm5
; AVX2-NEXT: jmp .LBB80_15
; AVX2-NEXT: .LBB80_13:
@@ -4396,7 +4396,7 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_16
-; AVX2-NEXT: # BB#17:
+; AVX2-NEXT: # %bb.17:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3
; AVX2-NEXT: jmp .LBB80_18
; AVX2-NEXT: .LBB80_16:
@@ -4413,7 +4413,7 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX2-NEXT: vmovq %xmm4, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_19
-; AVX2-NEXT: # BB#20:
+; AVX2-NEXT: # %bb.20:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm5
; AVX2-NEXT: jmp .LBB80_21
; AVX2-NEXT: .LBB80_19:
@@ -4429,7 +4429,7 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX2-NEXT: vpextrq $1, %xmm4, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_22
-; AVX2-NEXT: # BB#23:
+; AVX2-NEXT: # %bb.23:
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2
; AVX2-NEXT: jmp .LBB80_24
; AVX2-NEXT: .LBB80_22:
@@ -4445,7 +4445,7 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_load_8i64_to_8f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; AVX512F-NEXT: vpextrq $1, %xmm1, %rax
@@ -4476,7 +4476,7 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_8i64_to_8f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512VL-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; AVX512VL-NEXT: vpextrq $1, %xmm1, %rax
@@ -4507,12 +4507,12 @@ define <8 x float> @uitofp_load_8i64_to_
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_8i64_to_8f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvtuqq2ps (%rdi), %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_8i64_to_8f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtuqq2ps (%rdi), %ymm0
; AVX512VLDQ-NEXT: retq
%ld = load <8 x i64>, <8 x i64> *%a
@@ -4522,7 +4522,7 @@ define <8 x float> @uitofp_load_8i64_to_
define <8 x float> @uitofp_load_8i32_to_8f32(<8 x i32> *%a) {
; SSE-LABEL: uitofp_load_8i32_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: movdqa 16(%rdi), %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
@@ -4545,7 +4545,7 @@ define <8 x float> @uitofp_load_8i32_to_
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_load_8i32_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
@@ -4559,7 +4559,7 @@ define <8 x float> @uitofp_load_8i32_to_
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_load_8i32_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200]
; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
@@ -4572,26 +4572,26 @@ define <8 x float> @uitofp_load_8i32_to_
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_load_8i32_to_8f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps (%rdi), %ymm0
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_8i32_to_8f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2ps (%rdi), %ymm0
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_load_8i32_to_8f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_8i32_to_8f32:
-; AVX512VLDQ: # BB#0:
+; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2ps (%rdi), %ymm0
; AVX512VLDQ-NEXT: retq
%ld = load <8 x i32>, <8 x i32> *%a
@@ -4601,7 +4601,7 @@ define <8 x float> @uitofp_load_8i32_to_
define <8 x float> @uitofp_load_8i16_to_8f32(<8 x i16> *%a) {
; SSE-LABEL: uitofp_load_8i16_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: movdqa %xmm1, %xmm0
@@ -4612,7 +4612,7 @@ define <8 x float> @uitofp_load_8i16_to_
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_load_8i16_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -4620,13 +4620,13 @@ define <8 x float> @uitofp_load_8i16_to_
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_load_8i16_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_load_8i16_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -4637,7 +4637,7 @@ define <8 x float> @uitofp_load_8i16_to_
define <8 x float> @uitofp_load_8i8_to_8f32(<8 x i8> *%a) {
; SSE-LABEL: uitofp_load_8i8_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -4649,7 +4649,7 @@ define <8 x float> @uitofp_load_8i8_to_8
; SSE-NEXT: retq
;
; AVX1-LABEL: uitofp_load_8i8_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -4657,13 +4657,13 @@ define <8 x float> @uitofp_load_8i8_to_8
; AVX1-NEXT: retq
;
; AVX2-LABEL: uitofp_load_8i8_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: uitofp_load_8i8_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -4679,7 +4679,7 @@ define <8 x float> @uitofp_load_8i8_to_8
%Arguments = type <{ <8 x i8>, <8 x i16>, <8 x float>* }>
define void @aggregate_sitofp_8i16_to_8f32(%Arguments* nocapture readonly %a0) {
; SSE-LABEL: aggregate_sitofp_8i16_to_8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movq 24(%rdi), %rax
; SSE-NEXT: movdqu 8(%rdi), %xmm0
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -4693,7 +4693,7 @@ define void @aggregate_sitofp_8i16_to_8f
; SSE-NEXT: retq
;
; AVX1-LABEL: aggregate_sitofp_8i16_to_8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movq 24(%rdi), %rax
; AVX1-NEXT: vmovdqu 8(%rdi), %xmm0
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
@@ -4706,7 +4706,7 @@ define void @aggregate_sitofp_8i16_to_8f
; AVX1-NEXT: retq
;
; AVX2-LABEL: aggregate_sitofp_8i16_to_8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movq 24(%rdi), %rax
; AVX2-NEXT: vpmovsxwd 8(%rdi), %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
@@ -4715,7 +4715,7 @@ define void @aggregate_sitofp_8i16_to_8f
; AVX2-NEXT: retq
;
; AVX512-LABEL: aggregate_sitofp_8i16_to_8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: movq 24(%rdi), %rax
; AVX512-NEXT: vpmovsxwd 8(%rdi), %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
@@ -4732,12 +4732,12 @@ define void @aggregate_sitofp_8i16_to_8f
define <2 x double> @sitofp_i32_to_2f64(<2 x double> %a0, i32 %a1) nounwind {
; SSE-LABEL: sitofp_i32_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtsi2sdl %edi, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_i32_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0
; AVX-NEXT: retq
%cvt = sitofp i32 %a1 to double
@@ -4747,12 +4747,12 @@ define <2 x double> @sitofp_i32_to_2f64(
define <4 x float> @sitofp_i32_to_4f32(<4 x float> %a0, i32 %a1) nounwind {
; SSE-LABEL: sitofp_i32_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtsi2ssl %edi, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_i32_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0
; AVX-NEXT: retq
%cvt = sitofp i32 %a1 to float
@@ -4762,12 +4762,12 @@ define <4 x float> @sitofp_i32_to_4f32(<
define <2 x double> @sitofp_i64_to_2f64(<2 x double> %a0, i64 %a1) nounwind {
; SSE-LABEL: sitofp_i64_to_2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtsi2sdq %rdi, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_i64_to_2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0
; AVX-NEXT: retq
%cvt = sitofp i64 %a1 to double
@@ -4777,12 +4777,12 @@ define <2 x double> @sitofp_i64_to_2f64(
define <4 x float> @sitofp_i64_to_4f32(<4 x float> %a0, i64 %a1) nounwind {
; SSE-LABEL: sitofp_i64_to_4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cvtsi2ssq %rdi, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sitofp_i64_to_4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0
; AVX-NEXT: retq
%cvt = sitofp i64 %a1 to float
Modified: llvm/trunk/test/CodeGen/X86/vec_loadsingles.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_loadsingles.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_loadsingles.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_loadsingles.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define <4 x float> @merge_2_floats(float* nocapture %p) nounwind readonly {
; ALL-LABEL: merge_2_floats:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: retq
%tmp1 = load float, float* %p
@@ -19,7 +19,7 @@ define <4 x float> @merge_2_floats(float
; two i64s of a <4 x i64> as a load of two i32s.
define <4 x i64> @merge_2_floats_into_4() {
; ALL-LABEL: merge_2_floats_into_4:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movq (%rax), %rax
; ALL-NEXT: vmovups (%rax), %xmm0
; ALL-NEXT: retq
@@ -37,7 +37,7 @@ define <4 x i64> @merge_2_floats_into_4(
define <4 x float> @merge_4_floats(float* %ptr) {
; ALL-LABEL: merge_4_floats:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovups (%rdi), %xmm0
; ALL-NEXT: retq
%a = load float, float* %ptr, align 8
@@ -61,12 +61,12 @@ define <4 x float> @merge_4_floats(float
define <8 x float> @merge_8_floats(float* %ptr) {
; FAST32-LABEL: merge_8_floats:
-; FAST32: # BB#0:
+; FAST32: # %bb.0:
; FAST32-NEXT: vmovups (%rdi), %ymm0
; FAST32-NEXT: retq
;
; SLOW32-LABEL: merge_8_floats:
-; SLOW32: # BB#0:
+; SLOW32: # %bb.0:
; SLOW32-NEXT: vmovups (%rdi), %xmm0
; SLOW32-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
; SLOW32-NEXT: retq
@@ -98,12 +98,12 @@ define <8 x float> @merge_8_floats(float
define <4 x double> @merge_4_doubles(double* %ptr) {
; FAST32-LABEL: merge_4_doubles:
-; FAST32: # BB#0:
+; FAST32: # %bb.0:
; FAST32-NEXT: vmovups (%rdi), %ymm0
; FAST32-NEXT: retq
;
; SLOW32-LABEL: merge_4_doubles:
-; SLOW32: # BB#0:
+; SLOW32: # %bb.0:
; SLOW32-NEXT: vmovups (%rdi), %xmm0
; SLOW32-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
; SLOW32-NEXT: retq
@@ -126,12 +126,12 @@ define <4 x double> @merge_4_doubles(dou
; first of the combined loads is offset from the base address.
define <4 x double> @merge_4_doubles_offset(double* %ptr) {
; FAST32-LABEL: merge_4_doubles_offset:
-; FAST32: # BB#0:
+; FAST32: # %bb.0:
; FAST32-NEXT: vmovups 32(%rdi), %ymm0
; FAST32-NEXT: retq
;
; SLOW32-LABEL: merge_4_doubles_offset:
-; SLOW32: # BB#0:
+; SLOW32: # %bb.0:
; SLOW32-NEXT: vmovups 32(%rdi), %xmm0
; SLOW32-NEXT: vinsertf128 $1, 48(%rdi), %ymm0, %ymm0
; SLOW32-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/vec_logical.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_logical.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_logical.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_logical.ll Mon Dec 4 09:18:51 2017
@@ -4,13 +4,13 @@
define void @t(<4 x float> %A) {
; SSE-LABEL: t:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0
; SSE-NEXT: movaps %xmm0, 0
; SSE-NEXT: retl
;
; AVX-LABEL: t:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps {{\.LCPI.*}}, %xmm0, %xmm0
; AVX-NEXT: vmovaps %xmm0, 0
; AVX-NEXT: retl
@@ -21,12 +21,12 @@ define void @t(<4 x float> %A) {
define <4 x float> @t1(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: t1:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: xorps %xmm1, %xmm0
; SSE-NEXT: retl
;
; AVX-LABEL: t1:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retl
entry:
@@ -39,12 +39,12 @@ entry:
define <2 x double> @t2(<2 x double> %a, <2 x double> %b) {
; SSE-LABEL: t2:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: andps %xmm1, %xmm0
; SSE-NEXT: retl
;
; AVX-LABEL: t2:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retl
entry:
@@ -57,7 +57,7 @@ entry:
define void @t3(<4 x float> %a, <4 x float> %b, <4 x float>* %c, <4 x float>* %d) {
; SSE-LABEL: t3:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; SSE-NEXT: andnps %xmm1, %xmm0
@@ -66,7 +66,7 @@ define void @t3(<4 x float> %a, <4 x flo
; SSE-NEXT: retl
;
; AVX-LABEL: t3:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
; AVX-NEXT: vandnps %xmm1, %xmm0, %xmm0
@@ -88,13 +88,13 @@ entry:
define <2 x i64> @andn_double_xor(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) {
; SSE-LABEL: andn_double_xor:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm2, %xmm1
; SSE-NEXT: andnps %xmm1, %xmm0
; SSE-NEXT: retl
;
; AVX-LABEL: andn_double_xor:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm2, %xmm1, %xmm1
; AVX-NEXT: vandnps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retl
Modified: llvm/trunk/test/CodeGen/X86/vec_minmax_match.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_minmax_match.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_minmax_match.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_minmax_match.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@
define <4 x i32> @smin_vec1(<4 x i32> %x) {
; CHECK-LABEL: smin_vec1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpminsd %xmm1, %xmm0, %xmm0
@@ -19,7 +19,7 @@ define <4 x i32> @smin_vec1(<4 x i32> %x
define <4 x i32> @smin_vec2(<4 x i32> %x) {
; CHECK-LABEL: smin_vec2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpminsd %xmm1, %xmm0, %xmm0
@@ -34,7 +34,7 @@ define <4 x i32> @smin_vec2(<4 x i32> %x
; (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
define <4 x i32> @smin_vec3(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: smin_vec3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpminsd %xmm1, %xmm0, %xmm0
@@ -49,7 +49,7 @@ define <4 x i32> @smin_vec3(<4 x i32> %x
; (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
define <4 x i32> @smin_vec4(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: smin_vec4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpminsd %xmm1, %xmm0, %xmm0
@@ -62,7 +62,7 @@ define <4 x i32> @smin_vec4(<4 x i32> %x
define <4 x i32> @smax_vec1(<4 x i32> %x) {
; CHECK-LABEL: smax_vec1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
@@ -75,7 +75,7 @@ define <4 x i32> @smax_vec1(<4 x i32> %x
define <4 x i32> @smax_vec2(<4 x i32> %x) {
; CHECK-LABEL: smax_vec2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
@@ -90,7 +90,7 @@ define <4 x i32> @smax_vec2(<4 x i32> %x
; (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
define <4 x i32> @smax_vec3(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: smax_vec3:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
@@ -105,7 +105,7 @@ define <4 x i32> @smax_vec3(<4 x i32> %x
; (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
define <4 x i32> @smax_vec4(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: smax_vec4:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
@@ -118,7 +118,7 @@ define <4 x i32> @smax_vec4(<4 x i32> %x
define <4 x i32> @umax_vec1(<4 x i32> %x) {
; CHECK-LABEL: umax_vec1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
%cmp = icmp slt <4 x i32> %x, zeroinitializer
@@ -128,7 +128,7 @@ define <4 x i32> @umax_vec1(<4 x i32> %x
define <4 x i32> @umax_vec2(<4 x i32> %x) {
; CHECK-LABEL: umax_vec2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
%cmp = icmp sgt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -138,7 +138,7 @@ define <4 x i32> @umax_vec2(<4 x i32> %x
define <4 x i32> @umin_vec1(<4 x i32> %x) {
; CHECK-LABEL: umin_vec1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
%cmp = icmp slt <4 x i32> %x, zeroinitializer
@@ -148,7 +148,7 @@ define <4 x i32> @umin_vec1(<4 x i32> %x
define <4 x i32> @umin_vec2(<4 x i32> %x) {
; CHECK-LABEL: umin_vec2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
%cmp = icmp sgt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -163,7 +163,7 @@ define <4 x i32> @umin_vec2(<4 x i32> %x
define <4 x i32> @clamp_signed1(<4 x i32> %x) {
; CHECK-LABEL: clamp_signed1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vpmaxsd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -178,7 +178,7 @@ define <4 x i32> @clamp_signed1(<4 x i32
define <4 x i32> @clamp_signed2(<4 x i32> %x) {
; CHECK-LABEL: clamp_signed2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxsd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -193,7 +193,7 @@ define <4 x i32> @clamp_signed2(<4 x i32
define <4 x i32> @clamp_unsigned1(<4 x i32> %x) {
; CHECK-LABEL: clamp_unsigned1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vpmaxud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -208,7 +208,7 @@ define <4 x i32> @clamp_unsigned1(<4 x i
define <4 x i32> @clamp_unsigned2(<4 x i32> %x) {
; CHECK-LABEL: clamp_unsigned2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -221,7 +221,7 @@ define <4 x i32> @clamp_unsigned2(<4 x i
define <4 x i32> @wrong_pred_for_smin_with_not(<4 x i32> %x) {
; CHECK-LABEL: wrong_pred_for_smin_with_not:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm1
; CHECK-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
@@ -237,7 +237,7 @@ define <4 x i32> @wrong_pred_for_smin_wi
define <4 x i32> @wrong_pred_for_smin_with_subnsw(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: wrong_pred_for_smin_with_subnsw:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm2
; CHECK-NEXT: vpminud %xmm1, %xmm0, %xmm1
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/vec_minmax_sint.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_minmax_sint.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_minmax_sint.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_minmax_sint.ll Mon Dec 4 09:18:51 2017
@@ -13,7 +13,7 @@
define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: max_gt_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -32,7 +32,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
; SSE41-NEXT: movdqa %xmm1, %xmm3
@@ -51,7 +51,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64>
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
@@ -59,19 +59,19 @@ define <2 x i64> @max_gt_v2i64(<2 x i64>
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_gt_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
@@ -85,7 +85,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64>
define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: max_gt_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm3, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -119,7 +119,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
; SSE41-NEXT: movdqa %xmm3, %xmm5
@@ -153,7 +153,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64>
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: movdqa %xmm1, %xmm5
; SSE42-NEXT: pcmpgtq %xmm3, %xmm5
@@ -166,7 +166,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64>
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_gt_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -176,13 +176,13 @@ define <4 x i64> @max_gt_v4i64(<4 x i64>
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
@@ -195,7 +195,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64>
define <4 x i32> @max_gt_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: max_gt_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -205,17 +205,17 @@ define <4 x i32> @max_gt_v4i32(<4 x i32>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxsd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxsd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: max_gt_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <4 x i32> %a, %b
@@ -225,7 +225,7 @@ define <4 x i32> @max_gt_v4i32(<4 x i32>
define <8 x i32> @max_gt_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: max_gt_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
@@ -241,19 +241,19 @@ define <8 x i32> @max_gt_v8i32(<8 x i32>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxsd %xmm2, %xmm0
; SSE41-NEXT: pmaxsd %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v8i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxsd %xmm2, %xmm0
; SSE42-NEXT: pmaxsd %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_gt_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsd %xmm2, %xmm3, %xmm2
@@ -262,12 +262,12 @@ define <8 x i32> @max_gt_v8i32(<8 x i32>
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sgt <8 x i32> %a, %b
@@ -277,12 +277,12 @@ define <8 x i32> @max_gt_v8i32(<8 x i32>
define <8 x i16> @max_gt_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: max_gt_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxsw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <8 x i16> %a, %b
@@ -292,13 +292,13 @@ define <8 x i16> @max_gt_v8i16(<8 x i16>
define <16 x i16> @max_gt_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: max_gt_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxsw %xmm2, %xmm0
; SSE-NEXT: pmaxsw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: max_gt_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2
@@ -307,12 +307,12 @@ define <16 x i16> @max_gt_v16i16(<16 x i
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sgt <16 x i16> %a, %b
@@ -322,7 +322,7 @@ define <16 x i16> @max_gt_v16i16(<16 x i
define <16 x i8> @max_gt_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: max_gt_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -332,17 +332,17 @@ define <16 x i8> @max_gt_v16i8(<16 x i8>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxsb %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v16i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxsb %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: max_gt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <16 x i8> %a, %b
@@ -352,7 +352,7 @@ define <16 x i8> @max_gt_v16i8(<16 x i8>
define <32 x i8> @max_gt_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE2-LABEL: max_gt_v32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pcmpgtb %xmm3, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm5
@@ -368,19 +368,19 @@ define <32 x i8> @max_gt_v32i8(<32 x i8>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v32i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxsb %xmm2, %xmm0
; SSE41-NEXT: pmaxsb %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v32i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxsb %xmm2, %xmm0
; SSE42-NEXT: pmaxsb %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_gt_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2
@@ -389,12 +389,12 @@ define <32 x i8> @max_gt_v32i8(<32 x i8>
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sgt <32 x i8> %a, %b
@@ -408,7 +408,7 @@ define <32 x i8> @max_gt_v32i8(<32 x i8>
define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: max_ge_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -430,7 +430,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
; SSE41-NEXT: movdqa %xmm2, %xmm3
@@ -451,7 +451,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64>
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: movdqa %xmm1, %xmm3
; SSE42-NEXT: pcmpgtq %xmm2, %xmm3
@@ -462,7 +462,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64>
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_ge_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -470,7 +470,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64>
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -478,7 +478,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64>
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
@@ -492,7 +492,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64>
define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: max_ge_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm7, %xmm4
@@ -532,7 +532,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
; SSE41-NEXT: movdqa %xmm1, %xmm5
@@ -569,7 +569,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64>
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: movdqa %xmm3, %xmm5
; SSE42-NEXT: pcmpgtq %xmm1, %xmm5
@@ -586,7 +586,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64>
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_ge_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -599,7 +599,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64>
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
@@ -607,7 +607,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64>
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
@@ -620,7 +620,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64>
define <4 x i32> @max_ge_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: max_ge_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
@@ -632,17 +632,17 @@ define <4 x i32> @max_ge_v4i32(<4 x i32>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxsd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxsd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: max_ge_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sge <4 x i32> %a, %b
@@ -652,7 +652,7 @@ define <4 x i32> @max_ge_v4i32(<4 x i32>
define <8 x i32> @max_ge_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: max_ge_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pcmpgtd %xmm1, %xmm6
; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
@@ -672,19 +672,19 @@ define <8 x i32> @max_ge_v8i32(<8 x i32>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxsd %xmm2, %xmm0
; SSE41-NEXT: pmaxsd %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v8i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxsd %xmm2, %xmm0
; SSE42-NEXT: pmaxsd %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_ge_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsd %xmm2, %xmm3, %xmm2
@@ -693,12 +693,12 @@ define <8 x i32> @max_ge_v8i32(<8 x i32>
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sge <8 x i32> %a, %b
@@ -708,12 +708,12 @@ define <8 x i32> @max_ge_v8i32(<8 x i32>
define <8 x i16> @max_ge_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: max_ge_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxsw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sge <8 x i16> %a, %b
@@ -723,13 +723,13 @@ define <8 x i16> @max_ge_v8i16(<8 x i16>
define <16 x i16> @max_ge_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: max_ge_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxsw %xmm2, %xmm0
; SSE-NEXT: pmaxsw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: max_ge_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2
@@ -738,12 +738,12 @@ define <16 x i16> @max_ge_v16i16(<16 x i
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sge <16 x i16> %a, %b
@@ -753,7 +753,7 @@ define <16 x i16> @max_ge_v16i16(<16 x i
define <16 x i8> @max_ge_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: max_ge_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pcmpgtb %xmm0, %xmm3
; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
@@ -765,17 +765,17 @@ define <16 x i8> @max_ge_v16i8(<16 x i8>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxsb %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v16i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxsb %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: max_ge_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sge <16 x i8> %a, %b
@@ -785,7 +785,7 @@ define <16 x i8> @max_ge_v16i8(<16 x i8>
define <32 x i8> @max_ge_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE2-LABEL: max_ge_v32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pcmpgtb %xmm1, %xmm6
; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
@@ -805,19 +805,19 @@ define <32 x i8> @max_ge_v32i8(<32 x i8>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v32i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxsb %xmm2, %xmm0
; SSE41-NEXT: pmaxsb %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v32i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxsb %xmm2, %xmm0
; SSE42-NEXT: pmaxsb %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_ge_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2
@@ -826,12 +826,12 @@ define <32 x i8> @max_ge_v32i8(<32 x i8>
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sge <32 x i8> %a, %b
@@ -845,7 +845,7 @@ define <32 x i8> @max_ge_v32i8(<32 x i8>
define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: min_lt_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -864,7 +864,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
; SSE41-NEXT: movdqa %xmm2, %xmm3
@@ -883,7 +883,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64>
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: movdqa %xmm1, %xmm0
; SSE42-NEXT: pcmpgtq %xmm2, %xmm0
@@ -892,19 +892,19 @@ define <2 x i64> @min_lt_v2i64(<2 x i64>
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_lt_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
@@ -918,7 +918,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64>
define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: min_lt_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -952,7 +952,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
; SSE41-NEXT: movdqa %xmm1, %xmm5
@@ -986,7 +986,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64>
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: movdqa %xmm3, %xmm5
; SSE42-NEXT: pcmpgtq %xmm1, %xmm5
@@ -1000,7 +1000,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64>
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_lt_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -1010,13 +1010,13 @@ define <4 x i64> @min_lt_v4i64(<4 x i64>
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
@@ -1029,7 +1029,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64>
define <4 x i32> @min_lt_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: min_lt_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -1038,17 +1038,17 @@ define <4 x i32> @min_lt_v4i32(<4 x i32>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminsd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminsd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: min_lt_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp slt <4 x i32> %a, %b
@@ -1058,7 +1058,7 @@ define <4 x i32> @min_lt_v4i32(<4 x i32>
define <8 x i32> @min_lt_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: min_lt_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm5
@@ -1072,19 +1072,19 @@ define <8 x i32> @min_lt_v8i32(<8 x i32>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminsd %xmm2, %xmm0
; SSE41-NEXT: pminsd %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v8i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminsd %xmm2, %xmm0
; SSE42-NEXT: pminsd %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_lt_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsd %xmm2, %xmm3, %xmm2
@@ -1093,12 +1093,12 @@ define <8 x i32> @min_lt_v8i32(<8 x i32>
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp slt <8 x i32> %a, %b
@@ -1108,12 +1108,12 @@ define <8 x i32> @min_lt_v8i32(<8 x i32>
define <8 x i16> @min_lt_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: min_lt_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminsw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminsw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp slt <8 x i16> %a, %b
@@ -1123,13 +1123,13 @@ define <8 x i16> @min_lt_v8i16(<8 x i16>
define <16 x i16> @min_lt_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: min_lt_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminsw %xmm2, %xmm0
; SSE-NEXT: pminsw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: min_lt_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsw %xmm2, %xmm3, %xmm2
@@ -1138,12 +1138,12 @@ define <16 x i16> @min_lt_v16i16(<16 x i
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp slt <16 x i16> %a, %b
@@ -1153,7 +1153,7 @@ define <16 x i16> @min_lt_v16i16(<16 x i
define <16 x i8> @min_lt_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: min_lt_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
; SSE2-NEXT: pand %xmm2, %xmm0
@@ -1162,17 +1162,17 @@ define <16 x i8> @min_lt_v16i8(<16 x i8>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminsb %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v16i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminsb %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: min_lt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp slt <16 x i8> %a, %b
@@ -1182,7 +1182,7 @@ define <16 x i8> @min_lt_v16i8(<16 x i8>
define <32 x i8> @min_lt_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE2-LABEL: min_lt_v32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pcmpgtb %xmm1, %xmm4
; SSE2-NEXT: movdqa %xmm2, %xmm5
@@ -1196,19 +1196,19 @@ define <32 x i8> @min_lt_v32i8(<32 x i8>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v32i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminsb %xmm2, %xmm0
; SSE41-NEXT: pminsb %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v32i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminsb %xmm2, %xmm0
; SSE42-NEXT: pminsb %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_lt_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsb %xmm2, %xmm3, %xmm2
@@ -1217,12 +1217,12 @@ define <32 x i8> @min_lt_v32i8(<32 x i8>
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp slt <32 x i8> %a, %b
@@ -1236,7 +1236,7 @@ define <32 x i8> @min_lt_v32i8(<32 x i8>
define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: min_le_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -1258,7 +1258,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
; SSE41-NEXT: movdqa %xmm1, %xmm3
@@ -1279,7 +1279,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64>
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: pcmpeqd %xmm3, %xmm3
@@ -1289,7 +1289,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64>
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_le_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -1297,7 +1297,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64>
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -1305,7 +1305,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64>
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
@@ -1319,7 +1319,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64>
define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: min_le_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,0,2147483648,0]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm7, %xmm4
@@ -1359,7 +1359,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,0,2147483648,0]
; SSE41-NEXT: movdqa %xmm3, %xmm5
@@ -1396,7 +1396,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64>
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: movdqa %xmm1, %xmm5
; SSE42-NEXT: pcmpgtq %xmm3, %xmm5
@@ -1412,7 +1412,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64>
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_le_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -1425,7 +1425,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64>
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
@@ -1433,7 +1433,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64>
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
@@ -1446,7 +1446,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64>
define <4 x i32> @min_le_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: min_le_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
@@ -1458,17 +1458,17 @@ define <4 x i32> @min_le_v4i32(<4 x i32>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminsd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminsd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: min_le_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sle <4 x i32> %a, %b
@@ -1478,7 +1478,7 @@ define <4 x i32> @min_le_v4i32(<4 x i32>
define <8 x i32> @min_le_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: min_le_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pcmpgtd %xmm3, %xmm6
; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
@@ -1498,19 +1498,19 @@ define <8 x i32> @min_le_v8i32(<8 x i32>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminsd %xmm2, %xmm0
; SSE41-NEXT: pminsd %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v8i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminsd %xmm2, %xmm0
; SSE42-NEXT: pminsd %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_le_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsd %xmm2, %xmm3, %xmm2
@@ -1519,12 +1519,12 @@ define <8 x i32> @min_le_v8i32(<8 x i32>
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sle <8 x i32> %a, %b
@@ -1534,12 +1534,12 @@ define <8 x i32> @min_le_v8i32(<8 x i32>
define <8 x i16> @min_le_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: min_le_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminsw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminsw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sle <8 x i16> %a, %b
@@ -1549,13 +1549,13 @@ define <8 x i16> @min_le_v8i16(<8 x i16>
define <16 x i16> @min_le_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: min_le_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminsw %xmm2, %xmm0
; SSE-NEXT: pminsw %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: min_le_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsw %xmm2, %xmm3, %xmm2
@@ -1564,12 +1564,12 @@ define <16 x i16> @min_le_v16i16(<16 x i
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sle <16 x i16> %a, %b
@@ -1579,7 +1579,7 @@ define <16 x i16> @min_le_v16i16(<16 x i
define <16 x i8> @min_le_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: min_le_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
@@ -1591,17 +1591,17 @@ define <16 x i8> @min_le_v16i8(<16 x i8>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminsb %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v16i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminsb %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: min_le_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sle <16 x i8> %a, %b
@@ -1611,7 +1611,7 @@ define <16 x i8> @min_le_v16i8(<16 x i8>
define <32 x i8> @min_le_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE2-LABEL: min_le_v32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pcmpgtb %xmm3, %xmm6
; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
@@ -1631,19 +1631,19 @@ define <32 x i8> @min_le_v32i8(<32 x i8>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v32i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminsb %xmm2, %xmm0
; SSE41-NEXT: pminsb %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v32i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminsb %xmm2, %xmm0
; SSE42-NEXT: pminsb %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_le_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminsb %xmm2, %xmm3, %xmm2
@@ -1652,12 +1652,12 @@ define <32 x i8> @min_le_v32i8(<32 x i8>
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sle <32 x i8> %a, %b
@@ -1671,12 +1671,12 @@ define <32 x i8> @min_le_v32i8(<32 x i8>
define <2 x i64> @max_gt_v2i64c() {
; SSE-LABEL: max_gt_v2i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551615,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v2i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551615,7]
; AVX-NEXT: retq
%1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
@@ -1688,13 +1688,13 @@ define <2 x i64> @max_gt_v2i64c() {
define <4 x i64> @max_gt_v4i64c() {
; SSE-LABEL: max_gt_v4i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,7]
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v4i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,7,7]
; AVX-NEXT: retq
%1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
@@ -1706,12 +1706,12 @@ define <4 x i64> @max_gt_v4i64c() {
define <4 x i32> @max_gt_v4i32c() {
; SSE-LABEL: max_gt_v4i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v4i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
; AVX-NEXT: retq
%1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
@@ -1723,13 +1723,13 @@ define <4 x i32> @max_gt_v4i32c() {
define <8 x i32> @max_gt_v8i32c() {
; SSE-LABEL: max_gt_v8i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967293,4294967293,4294967295]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,5,5,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v8i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967295,4294967293,4294967293,4294967295,7,5,5,7]
; AVX-NEXT: retq
%1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
@@ -1741,12 +1741,12 @@ define <8 x i32> @max_gt_v8i32c() {
define <8 x i16> @max_gt_v8i16c() {
; SSE-LABEL: max_gt_v8i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v8i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
@@ -1758,13 +1758,13 @@ define <8 x i16> @max_gt_v8i16c() {
define <16 x i16> @max_gt_v16i16c() {
; SSE-LABEL: max_gt_v16i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65534,65533,65532,65533,65534,65535,0]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,6,5,4,5,6,7,8]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v16i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65535,65534,65533,65532,65533,65534,65535,0,7,6,5,4,5,6,7,8]
; AVX-NEXT: retq
%1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
@@ -1776,12 +1776,12 @@ define <16 x i16> @max_gt_v16i16c() {
define <16 x i8> @max_gt_v16i8c() {
; SSE-LABEL: max_gt_v16i8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v16i8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
; AVX-NEXT: retq
%1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
@@ -1793,12 +1793,12 @@ define <16 x i8> @max_gt_v16i8c() {
define <2 x i64> @max_ge_v2i64c() {
; SSE-LABEL: max_ge_v2i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551615,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v2i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551615,7]
; AVX-NEXT: retq
%1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
@@ -1810,13 +1810,13 @@ define <2 x i64> @max_ge_v2i64c() {
define <4 x i64> @max_ge_v4i64c() {
; SSE-LABEL: max_ge_v4i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,7]
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v4i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,7,7]
; AVX-NEXT: retq
%1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
@@ -1828,12 +1828,12 @@ define <4 x i64> @max_ge_v4i64c() {
define <4 x i32> @max_ge_v4i32c() {
; SSE-LABEL: max_ge_v4i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v4i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
; AVX-NEXT: retq
%1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
@@ -1845,13 +1845,13 @@ define <4 x i32> @max_ge_v4i32c() {
define <8 x i32> @max_ge_v8i32c() {
; SSE-LABEL: max_ge_v8i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967293,4294967293,4294967295]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,5,5,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v8i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967295,4294967293,4294967293,4294967295,7,5,5,7]
; AVX-NEXT: retq
%1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
@@ -1863,12 +1863,12 @@ define <8 x i32> @max_ge_v8i32c() {
define <8 x i16> @max_ge_v8i16c() {
; SSE-LABEL: max_ge_v8i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v8i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
@@ -1880,13 +1880,13 @@ define <8 x i16> @max_ge_v8i16c() {
define <16 x i16> @max_ge_v16i16c() {
; SSE-LABEL: max_ge_v16i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65534,65533,65532,65533,65534,65535,0]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,6,5,4,5,6,7,8]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v16i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65535,65534,65533,65532,65533,65534,65535,0,7,6,5,4,5,6,7,8]
; AVX-NEXT: retq
%1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
@@ -1898,12 +1898,12 @@ define <16 x i16> @max_ge_v16i16c() {
define <16 x i8> @max_ge_v16i8c() {
; SSE-LABEL: max_ge_v16i8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v16i8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
; AVX-NEXT: retq
%1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
@@ -1915,12 +1915,12 @@ define <16 x i8> @max_ge_v16i8c() {
define <2 x i64> @min_lt_v2i64c() {
; SSE-LABEL: min_lt_v2i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v2i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551609,1]
; AVX-NEXT: retq
%1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
@@ -1932,13 +1932,13 @@ define <2 x i64> @min_lt_v2i64c() {
define <4 x i64> @min_lt_v4i64c() {
; SSE-LABEL: min_lt_v4i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,18446744073709551609]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v4i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551609,18446744073709551609,1,1]
; AVX-NEXT: retq
%1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
@@ -1950,12 +1950,12 @@ define <4 x i64> @min_lt_v4i64c() {
define <4 x i32> @min_lt_v4i32c() {
; SSE-LABEL: min_lt_v4i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v4i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
; AVX-NEXT: retq
%1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
@@ -1967,13 +1967,13 @@ define <4 x i32> @min_lt_v4i32c() {
define <8 x i32> @min_lt_v8i32c() {
; SSE-LABEL: min_lt_v8i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967291,4294967291,4294967289]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,3,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v8i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967289,4294967291,4294967291,4294967289,1,3,3,1]
; AVX-NEXT: retq
%1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
@@ -1985,12 +1985,12 @@ define <8 x i32> @min_lt_v8i32c() {
define <8 x i16> @min_lt_v8i16c() {
; SSE-LABEL: min_lt_v8i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65529,65531,65531,65529,1,3,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v8i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65529,65531,65531,65529,1,3,3,1]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
@@ -2002,13 +2002,13 @@ define <8 x i16> @min_lt_v8i16c() {
define <16 x i16> @min_lt_v16i16c() {
; SSE-LABEL: min_lt_v16i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65529,65530,65531,65532,65531,65530,65529,0]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,2,3,4,3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v16i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65529,65530,65531,65532,65531,65530,65529,0,1,2,3,4,3,2,1,0]
; AVX-NEXT: retq
%1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
@@ -2020,12 +2020,12 @@ define <16 x i16> @min_lt_v16i16c() {
define <16 x i8> @min_lt_v16i8c() {
; SSE-LABEL: min_lt_v16i8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [249,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v16i8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [249,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
; AVX-NEXT: retq
%1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
@@ -2037,12 +2037,12 @@ define <16 x i8> @min_lt_v16i8c() {
define <2 x i64> @min_le_v2i64c() {
; SSE-LABEL: min_le_v2i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v2i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551609,1]
; AVX-NEXT: retq
%1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
@@ -2054,13 +2054,13 @@ define <2 x i64> @min_le_v2i64c() {
define <4 x i64> @min_le_v4i64c() {
; SSE-LABEL: min_le_v4i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,18446744073709551609]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v4i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551609,18446744073709551609,1,1]
; AVX-NEXT: retq
%1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
@@ -2072,12 +2072,12 @@ define <4 x i64> @min_le_v4i64c() {
define <4 x i32> @min_le_v4i32c() {
; SSE-LABEL: min_le_v4i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v4i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
; AVX-NEXT: retq
%1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
@@ -2089,13 +2089,13 @@ define <4 x i32> @min_le_v4i32c() {
define <8 x i32> @min_le_v8i32c() {
; SSE-LABEL: min_le_v8i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967291,4294967291,4294967289]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,3,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v8i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967289,4294967291,4294967291,4294967289,1,3,3,1]
; AVX-NEXT: retq
%1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
@@ -2107,12 +2107,12 @@ define <8 x i32> @min_le_v8i32c() {
define <8 x i16> @min_le_v8i16c() {
; SSE-LABEL: min_le_v8i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65529,65531,65531,65529,1,3,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v8i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65529,65531,65531,65529,1,3,3,1]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
@@ -2124,13 +2124,13 @@ define <8 x i16> @min_le_v8i16c() {
define <16 x i16> @min_le_v16i16c() {
; SSE-LABEL: min_le_v16i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65529,65530,65531,65532,65531,65530,65529,0]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,2,3,4,3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v16i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65529,65530,65531,65532,65531,65530,65529,0,1,2,3,4,3,2,1,0]
; AVX-NEXT: retq
%1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
@@ -2142,12 +2142,12 @@ define <16 x i16> @min_le_v16i16c() {
define <16 x i8> @min_le_v16i8c() {
; SSE-LABEL: min_le_v16i8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [249,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v16i8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [249,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
; AVX-NEXT: retq
%1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
Modified: llvm/trunk/test/CodeGen/X86/vec_minmax_uint.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_minmax_uint.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_minmax_uint.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_minmax_uint.ll Mon Dec 4 09:18:51 2017
@@ -13,7 +13,7 @@
define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: max_gt_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -32,7 +32,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm1, %xmm3
@@ -51,7 +51,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64>
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: movdqa %xmm1, %xmm3
@@ -63,7 +63,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64>
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_gt_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
@@ -72,7 +72,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64>
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2
@@ -81,7 +81,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64>
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
@@ -95,7 +95,7 @@ define <2 x i64> @max_gt_v2i64(<2 x i64>
define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: max_gt_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -129,7 +129,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm3, %xmm5
@@ -163,7 +163,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64>
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: movdqa %xmm3, %xmm6
@@ -183,7 +183,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64>
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_gt_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -198,7 +198,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64>
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
@@ -207,7 +207,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64>
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
@@ -220,7 +220,7 @@ define <4 x i64> @max_gt_v4i64(<4 x i64>
define <4 x i32> @max_gt_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: max_gt_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -232,17 +232,17 @@ define <4 x i32> @max_gt_v4i32(<4 x i32>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxud %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxud %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: max_gt_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ugt <4 x i32> %a, %b
@@ -252,7 +252,7 @@ define <4 x i32> @max_gt_v4i32(<4 x i32>
define <8 x i32> @max_gt_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: max_gt_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pxor %xmm5, %xmm6
@@ -273,19 +273,19 @@ define <8 x i32> @max_gt_v8i32(<8 x i32>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxud %xmm2, %xmm0
; SSE41-NEXT: pmaxud %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v8i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxud %xmm2, %xmm0
; SSE42-NEXT: pmaxud %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_gt_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm2
@@ -294,12 +294,12 @@ define <8 x i32> @max_gt_v8i32(<8 x i32>
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ugt <8 x i32> %a, %b
@@ -309,7 +309,7 @@ define <8 x i32> @max_gt_v8i32(<8 x i32>
define <8 x i16> @max_gt_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: max_gt_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -321,17 +321,17 @@ define <8 x i16> @max_gt_v8i16(<8 x i16>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxuw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v8i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxuw %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: max_gt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ugt <8 x i16> %a, %b
@@ -341,7 +341,7 @@ define <8 x i16> @max_gt_v8i16(<8 x i16>
define <16 x i16> @max_gt_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: max_gt_v16i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pxor %xmm5, %xmm6
@@ -362,19 +362,19 @@ define <16 x i16> @max_gt_v16i16(<16 x i
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_gt_v16i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxuw %xmm2, %xmm0
; SSE41-NEXT: pmaxuw %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_gt_v16i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxuw %xmm2, %xmm0
; SSE42-NEXT: pmaxuw %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_gt_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxuw %xmm2, %xmm3, %xmm2
@@ -383,12 +383,12 @@ define <16 x i16> @max_gt_v16i16(<16 x i
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ugt <16 x i16> %a, %b
@@ -398,12 +398,12 @@ define <16 x i16> @max_gt_v16i16(<16 x i
define <16 x i8> @max_gt_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: max_gt_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxub %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ugt <16 x i8> %a, %b
@@ -413,13 +413,13 @@ define <16 x i8> @max_gt_v16i8(<16 x i8>
define <32 x i8> @max_gt_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: max_gt_v32i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxub %xmm2, %xmm0
; SSE-NEXT: pmaxub %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: max_gt_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxub %xmm2, %xmm3, %xmm2
@@ -428,12 +428,12 @@ define <32 x i8> @max_gt_v32i8(<32 x i8>
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_gt_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_gt_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ugt <32 x i8> %a, %b
@@ -447,7 +447,7 @@ define <32 x i8> @max_gt_v32i8(<32 x i8>
define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: max_ge_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -469,7 +469,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm2, %xmm3
@@ -490,7 +490,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64>
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: pxor %xmm3, %xmm0
@@ -503,7 +503,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64>
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_ge_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm2
@@ -514,7 +514,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64>
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
@@ -525,7 +525,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64>
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
@@ -539,7 +539,7 @@ define <2 x i64> @max_ge_v2i64(<2 x i64>
define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: max_ge_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm7, %xmm4
@@ -579,7 +579,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm1, %xmm5
@@ -616,7 +616,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64>
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: movdqa %xmm1, %xmm6
@@ -639,7 +639,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64>
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_ge_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -657,7 +657,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64>
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm2
@@ -668,7 +668,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64>
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
@@ -681,7 +681,7 @@ define <4 x i64> @max_ge_v4i64(<4 x i64>
define <4 x i32> @max_ge_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: max_ge_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm2
@@ -696,17 +696,17 @@ define <4 x i32> @max_ge_v4i32(<4 x i32>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxud %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxud %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: max_ge_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp uge <4 x i32> %a, %b
@@ -716,7 +716,7 @@ define <4 x i32> @max_ge_v4i32(<4 x i32>
define <8 x i32> @max_ge_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: max_ge_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm6, %xmm4
@@ -742,19 +742,19 @@ define <8 x i32> @max_ge_v8i32(<8 x i32>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxud %xmm2, %xmm0
; SSE41-NEXT: pmaxud %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v8i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxud %xmm2, %xmm0
; SSE42-NEXT: pmaxud %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_ge_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm2
@@ -763,12 +763,12 @@ define <8 x i32> @max_ge_v8i32(<8 x i32>
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp uge <8 x i32> %a, %b
@@ -778,7 +778,7 @@ define <8 x i32> @max_ge_v8i32(<8 x i32>
define <8 x i16> @max_ge_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: max_ge_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psubusw %xmm0, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm3
@@ -789,17 +789,17 @@ define <8 x i16> @max_ge_v8i16(<8 x i16>
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxuw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v8i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxuw %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: max_ge_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp uge <8 x i16> %a, %b
@@ -809,7 +809,7 @@ define <8 x i16> @max_ge_v8i16(<8 x i16>
define <16 x i16> @max_ge_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: max_ge_v16i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: psubusw %xmm1, %xmm4
; SSE2-NEXT: pxor %xmm5, %xmm5
@@ -826,19 +826,19 @@ define <16 x i16> @max_ge_v16i16(<16 x i
; SSE2-NEXT: retq
;
; SSE41-LABEL: max_ge_v16i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxuw %xmm2, %xmm0
; SSE41-NEXT: pmaxuw %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: max_ge_v16i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pmaxuw %xmm2, %xmm0
; SSE42-NEXT: pmaxuw %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: max_ge_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxuw %xmm2, %xmm3, %xmm2
@@ -847,12 +847,12 @@ define <16 x i16> @max_ge_v16i16(<16 x i
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp uge <16 x i16> %a, %b
@@ -862,12 +862,12 @@ define <16 x i16> @max_ge_v16i16(<16 x i
define <16 x i8> @max_ge_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: max_ge_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxub %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp uge <16 x i8> %a, %b
@@ -877,13 +877,13 @@ define <16 x i8> @max_ge_v16i8(<16 x i8>
define <32 x i8> @max_ge_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: max_ge_v32i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxub %xmm2, %xmm0
; SSE-NEXT: pmaxub %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: max_ge_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpmaxub %xmm2, %xmm3, %xmm2
@@ -892,12 +892,12 @@ define <32 x i8> @max_ge_v32i8(<32 x i8>
; AVX1-NEXT: retq
;
; AVX2-LABEL: max_ge_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: max_ge_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp uge <32 x i8> %a, %b
@@ -911,7 +911,7 @@ define <32 x i8> @max_ge_v32i8(<32 x i8>
define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: min_lt_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -930,7 +930,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm2, %xmm3
@@ -949,7 +949,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64>
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: movdqa %xmm2, %xmm3
@@ -961,7 +961,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64>
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_lt_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm2
@@ -970,7 +970,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64>
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
@@ -979,7 +979,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64>
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
@@ -993,7 +993,7 @@ define <2 x i64> @min_lt_v2i64(<2 x i64>
define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: min_lt_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -1027,7 +1027,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm1, %xmm5
@@ -1061,7 +1061,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64>
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: movdqa %xmm1, %xmm6
@@ -1081,7 +1081,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64>
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_lt_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -1096,7 +1096,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64>
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm2
@@ -1105,7 +1105,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64>
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
@@ -1118,7 +1118,7 @@ define <4 x i64> @min_lt_v4i64(<4 x i64>
define <4 x i32> @min_lt_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: min_lt_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -1130,17 +1130,17 @@ define <4 x i32> @min_lt_v4i32(<4 x i32>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminud %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminud %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: min_lt_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ult <4 x i32> %a, %b
@@ -1150,7 +1150,7 @@ define <4 x i32> @min_lt_v4i32(<4 x i32>
define <8 x i32> @min_lt_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: min_lt_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -1170,19 +1170,19 @@ define <8 x i32> @min_lt_v8i32(<8 x i32>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminud %xmm2, %xmm0
; SSE41-NEXT: pminud %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v8i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminud %xmm2, %xmm0
; SSE42-NEXT: pminud %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_lt_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminud %xmm2, %xmm3, %xmm2
@@ -1191,12 +1191,12 @@ define <8 x i32> @min_lt_v8i32(<8 x i32>
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ult <8 x i32> %a, %b
@@ -1206,7 +1206,7 @@ define <8 x i32> @min_lt_v8i32(<8 x i32>
define <8 x i16> @min_lt_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: min_lt_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -1218,17 +1218,17 @@ define <8 x i16> @min_lt_v8i16(<8 x i16>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminuw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v8i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminuw %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: min_lt_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ult <8 x i16> %a, %b
@@ -1238,7 +1238,7 @@ define <8 x i16> @min_lt_v8i16(<8 x i16>
define <16 x i16> @min_lt_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: min_lt_v16i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pxor %xmm4, %xmm5
@@ -1258,19 +1258,19 @@ define <16 x i16> @min_lt_v16i16(<16 x i
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_lt_v16i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminuw %xmm2, %xmm0
; SSE41-NEXT: pminuw %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_lt_v16i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminuw %xmm2, %xmm0
; SSE42-NEXT: pminuw %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_lt_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminuw %xmm2, %xmm3, %xmm2
@@ -1279,12 +1279,12 @@ define <16 x i16> @min_lt_v16i16(<16 x i
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ult <16 x i16> %a, %b
@@ -1294,12 +1294,12 @@ define <16 x i16> @min_lt_v16i16(<16 x i
define <16 x i8> @min_lt_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: min_lt_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminub %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ult <16 x i8> %a, %b
@@ -1309,13 +1309,13 @@ define <16 x i8> @min_lt_v16i8(<16 x i8>
define <32 x i8> @min_lt_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: min_lt_v32i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminub %xmm2, %xmm0
; SSE-NEXT: pminub %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: min_lt_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminub %xmm2, %xmm3, %xmm2
@@ -1324,12 +1324,12 @@ define <32 x i8> @min_lt_v32i8(<32 x i8>
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_lt_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_lt_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminub %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ult <32 x i8> %a, %b
@@ -1343,7 +1343,7 @@ define <32 x i8> @min_lt_v32i8(<32 x i8>
define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: min_le_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
@@ -1365,7 +1365,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm1, %xmm3
@@ -1386,7 +1386,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64>
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: movdqa %xmm1, %xmm0
@@ -1400,7 +1400,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64>
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_le_v2i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
@@ -1411,7 +1411,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64>
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v2i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2
@@ -1422,7 +1422,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64>
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v2i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
@@ -1436,7 +1436,7 @@ define <2 x i64> @min_le_v2i64(<2 x i64>
define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: min_le_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm7, %xmm4
@@ -1476,7 +1476,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v4i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm3, %xmm5
@@ -1513,7 +1513,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64>
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
; SSE42-NEXT: movdqa %xmm3, %xmm6
@@ -1536,7 +1536,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64>
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_le_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
@@ -1554,7 +1554,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64>
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
@@ -1565,7 +1565,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64>
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
@@ -1578,7 +1578,7 @@ define <4 x i64> @min_le_v4i64(<4 x i64>
define <4 x i32> @min_le_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-LABEL: min_le_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm2
@@ -1593,17 +1593,17 @@ define <4 x i32> @min_le_v4i32(<4 x i32>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminud %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v4i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminud %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: min_le_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ule <4 x i32> %a, %b
@@ -1613,7 +1613,7 @@ define <4 x i32> @min_le_v4i32(<4 x i32>
define <8 x i32> @min_le_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-LABEL: min_le_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm6, %xmm4
@@ -1639,19 +1639,19 @@ define <8 x i32> @min_le_v8i32(<8 x i32>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v8i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminud %xmm2, %xmm0
; SSE41-NEXT: pminud %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v8i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminud %xmm2, %xmm0
; SSE42-NEXT: pminud %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_le_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminud %xmm2, %xmm3, %xmm2
@@ -1660,12 +1660,12 @@ define <8 x i32> @min_le_v8i32(<8 x i32>
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ule <8 x i32> %a, %b
@@ -1675,7 +1675,7 @@ define <8 x i32> @min_le_v8i32(<8 x i32>
define <8 x i16> @min_le_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-LABEL: min_le_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psubusw %xmm1, %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm3
@@ -1686,17 +1686,17 @@ define <8 x i16> @min_le_v8i16(<8 x i16>
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminuw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v8i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminuw %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: min_le_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ule <8 x i16> %a, %b
@@ -1706,7 +1706,7 @@ define <8 x i16> @min_le_v8i16(<8 x i16>
define <16 x i16> @min_le_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE2-LABEL: min_le_v16i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: psubusw %xmm3, %xmm4
; SSE2-NEXT: pxor %xmm6, %xmm6
@@ -1725,19 +1725,19 @@ define <16 x i16> @min_le_v16i16(<16 x i
; SSE2-NEXT: retq
;
; SSE41-LABEL: min_le_v16i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminuw %xmm2, %xmm0
; SSE41-NEXT: pminuw %xmm3, %xmm1
; SSE41-NEXT: retq
;
; SSE42-LABEL: min_le_v16i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pminuw %xmm2, %xmm0
; SSE42-NEXT: pminuw %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: min_le_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminuw %xmm2, %xmm3, %xmm2
@@ -1746,12 +1746,12 @@ define <16 x i16> @min_le_v16i16(<16 x i
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ule <16 x i16> %a, %b
@@ -1761,12 +1761,12 @@ define <16 x i16> @min_le_v16i16(<16 x i
define <16 x i8> @min_le_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: min_le_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminub %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp ule <16 x i8> %a, %b
@@ -1776,13 +1776,13 @@ define <16 x i8> @min_le_v16i8(<16 x i8>
define <32 x i8> @min_le_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: min_le_v32i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminub %xmm2, %xmm0
; SSE-NEXT: pminub %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: min_le_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpminub %xmm2, %xmm3, %xmm2
@@ -1791,12 +1791,12 @@ define <32 x i8> @min_le_v32i8(<32 x i8>
; AVX1-NEXT: retq
;
; AVX2-LABEL: min_le_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: min_le_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpminub %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp ule <32 x i8> %a, %b
@@ -1810,12 +1810,12 @@ define <32 x i8> @min_le_v32i8(<32 x i8>
define <2 x i64> @max_gt_v2i64c() {
; SSE-LABEL: max_gt_v2i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551615,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v2i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551615,7]
; AVX-NEXT: retq
%1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
@@ -1827,13 +1827,13 @@ define <2 x i64> @max_gt_v2i64c() {
define <4 x i64> @max_gt_v4i64c() {
; SSE-LABEL: max_gt_v4i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,7]
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v4i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,7,7]
; AVX-NEXT: retq
%1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
@@ -1845,12 +1845,12 @@ define <4 x i64> @max_gt_v4i64c() {
define <4 x i32> @max_gt_v4i32c() {
; SSE-LABEL: max_gt_v4i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v4i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
; AVX-NEXT: retq
%1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
@@ -1862,13 +1862,13 @@ define <4 x i32> @max_gt_v4i32c() {
define <8 x i32> @max_gt_v8i32c() {
; SSE-LABEL: max_gt_v8i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967293,4294967293,4294967295]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,5,5,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v8i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967295,4294967293,4294967293,4294967295,7,5,5,7]
; AVX-NEXT: retq
%1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
@@ -1880,12 +1880,12 @@ define <8 x i32> @max_gt_v8i32c() {
define <8 x i16> @max_gt_v8i16c() {
; SSE-LABEL: max_gt_v8i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v8i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
@@ -1897,13 +1897,13 @@ define <8 x i16> @max_gt_v8i16c() {
define <16 x i16> @max_gt_v16i16c() {
; SSE-LABEL: max_gt_v16i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65534,65533,65532,65533,65534,65535,0]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,6,5,4,5,6,7,8]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v16i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65535,65534,65533,65532,65533,65534,65535,0,7,6,5,4,5,6,7,8]
; AVX-NEXT: retq
%1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
@@ -1915,12 +1915,12 @@ define <16 x i16> @max_gt_v16i16c() {
define <16 x i8> @max_gt_v16i8c() {
; SSE-LABEL: max_gt_v16i8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
; SSE-NEXT: retq
;
; AVX-LABEL: max_gt_v16i8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
; AVX-NEXT: retq
%1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
@@ -1932,12 +1932,12 @@ define <16 x i8> @max_gt_v16i8c() {
define <2 x i64> @max_ge_v2i64c() {
; SSE-LABEL: max_ge_v2i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551615,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v2i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551615,7]
; AVX-NEXT: retq
%1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
@@ -1949,13 +1949,13 @@ define <2 x i64> @max_ge_v2i64c() {
define <4 x i64> @max_ge_v4i64c() {
; SSE-LABEL: max_ge_v4i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,7]
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v4i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,7,7]
; AVX-NEXT: retq
%1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
@@ -1967,12 +1967,12 @@ define <4 x i64> @max_ge_v4i64c() {
define <4 x i32> @max_ge_v4i32c() {
; SSE-LABEL: max_ge_v4i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v4i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
; AVX-NEXT: retq
%1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
@@ -1984,13 +1984,13 @@ define <4 x i32> @max_ge_v4i32c() {
define <8 x i32> @max_ge_v8i32c() {
; SSE-LABEL: max_ge_v8i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967293,4294967293,4294967295]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,5,5,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v8i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967295,4294967293,4294967293,4294967295,7,5,5,7]
; AVX-NEXT: retq
%1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
@@ -2002,12 +2002,12 @@ define <8 x i32> @max_ge_v8i32c() {
define <8 x i16> @max_ge_v8i16c() {
; SSE-LABEL: max_ge_v8i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v8i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
@@ -2019,13 +2019,13 @@ define <8 x i16> @max_ge_v8i16c() {
define <16 x i16> @max_ge_v16i16c() {
; SSE-LABEL: max_ge_v16i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65534,65533,65532,65533,65534,65535,0]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,6,5,4,5,6,7,8]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v16i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65535,65534,65533,65532,65533,65534,65535,0,7,6,5,4,5,6,7,8]
; AVX-NEXT: retq
%1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
@@ -2037,12 +2037,12 @@ define <16 x i16> @max_ge_v16i16c() {
define <16 x i8> @max_ge_v16i8c() {
; SSE-LABEL: max_ge_v16i8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
; SSE-NEXT: retq
;
; AVX-LABEL: max_ge_v16i8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
; AVX-NEXT: retq
%1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
@@ -2054,12 +2054,12 @@ define <16 x i8> @max_ge_v16i8c() {
define <2 x i64> @min_lt_v2i64c() {
; SSE-LABEL: min_lt_v2i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v2i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551609,1]
; AVX-NEXT: retq
%1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
@@ -2071,13 +2071,13 @@ define <2 x i64> @min_lt_v2i64c() {
define <4 x i64> @min_lt_v4i64c() {
; SSE-LABEL: min_lt_v4i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,18446744073709551609]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v4i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551609,18446744073709551609,1,1]
; AVX-NEXT: retq
%1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
@@ -2089,12 +2089,12 @@ define <4 x i64> @min_lt_v4i64c() {
define <4 x i32> @min_lt_v4i32c() {
; SSE-LABEL: min_lt_v4i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v4i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
; AVX-NEXT: retq
%1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
@@ -2106,13 +2106,13 @@ define <4 x i32> @min_lt_v4i32c() {
define <8 x i32> @min_lt_v8i32c() {
; SSE-LABEL: min_lt_v8i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967291,4294967291,4294967289]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,3,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v8i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967289,4294967291,4294967291,4294967289,1,3,3,1]
; AVX-NEXT: retq
%1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
@@ -2124,12 +2124,12 @@ define <8 x i32> @min_lt_v8i32c() {
define <8 x i16> @min_lt_v8i16c() {
; SSE-LABEL: min_lt_v8i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,65531,65531,65529,1,3,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v8i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,65531,65531,65529,1,3,3,1]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
@@ -2141,13 +2141,13 @@ define <8 x i16> @min_lt_v8i16c() {
define <16 x i16> @min_lt_v16i16c() {
; SSE-LABEL: min_lt_v16i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,65530,65531,65532,65531,65530,65529,0]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,2,3,4,3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v16i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,65530,65531,65532,65531,65530,65529,0,1,2,3,4,3,2,1,0]
; AVX-NEXT: retq
%1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
@@ -2159,12 +2159,12 @@ define <16 x i16> @min_lt_v16i16c() {
define <16 x i8> @min_lt_v16i8c() {
; SSE-LABEL: min_lt_v16i8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: min_lt_v16i8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
; AVX-NEXT: retq
%1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
@@ -2176,12 +2176,12 @@ define <16 x i8> @min_lt_v16i8c() {
define <2 x i64> @min_le_v2i64c() {
; SSE-LABEL: min_le_v2i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v2i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551609,1]
; AVX-NEXT: retq
%1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
@@ -2193,13 +2193,13 @@ define <2 x i64> @min_le_v2i64c() {
define <4 x i64> @min_le_v4i64c() {
; SSE-LABEL: min_le_v4i64c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,18446744073709551609]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v4i64c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551609,18446744073709551609,1,1]
; AVX-NEXT: retq
%1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
@@ -2211,12 +2211,12 @@ define <4 x i64> @min_le_v4i64c() {
define <4 x i32> @min_le_v4i32c() {
; SSE-LABEL: min_le_v4i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v4i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
; AVX-NEXT: retq
%1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
@@ -2228,13 +2228,13 @@ define <4 x i32> @min_le_v4i32c() {
define <8 x i32> @min_le_v8i32c() {
; SSE-LABEL: min_le_v8i32c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967291,4294967291,4294967289]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,3,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v8i32c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967289,4294967291,4294967291,4294967289,1,3,3,1]
; AVX-NEXT: retq
%1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
@@ -2246,12 +2246,12 @@ define <8 x i32> @min_le_v8i32c() {
define <8 x i16> @min_le_v8i16c() {
; SSE-LABEL: min_le_v8i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65529,65531,65531,65529,1,3,3,1]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v8i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65529,65531,65531,65529,1,3,3,1]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
@@ -2263,13 +2263,13 @@ define <8 x i16> @min_le_v8i16c() {
define <16 x i16> @min_le_v16i16c() {
; SSE-LABEL: min_le_v16i16c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [65529,65530,65531,65532,65531,65530,65529,0]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,2,3,4,3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v16i16c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65529,65530,65531,65532,65531,65530,65529,0,1,2,3,4,3,2,1,0]
; AVX-NEXT: retq
%1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
@@ -2281,12 +2281,12 @@ define <16 x i16> @min_le_v16i16c() {
define <16 x i8> @min_le_v16i8c() {
; SSE-LABEL: min_le_v16i8c:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [249,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: min_le_v16i8c:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [249,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
; AVX-NEXT: retq
%1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
Modified: llvm/trunk/test/CodeGen/X86/vec_partial.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_partial.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_partial.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_partial.ll Mon Dec 4 09:18:51 2017
@@ -5,12 +5,12 @@
; PR11580
define <3 x float> @addf3(<3 x float> %x) {
; X86-LABEL: addf3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: addps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: addf3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: addps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
entry:
@@ -21,11 +21,11 @@ entry:
; PR11580
define <4 x float> @cvtf3_f4(<3 x float> %x) {
; X86-LABEL: cvtf3_f4:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: retl
;
; X64-LABEL: cvtf3_f4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%extractVec = shufflevector <3 x float> %x, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
@@ -35,11 +35,11 @@ entry:
; PR11580
define <3 x float> @cvtf4_f3(<4 x float> %x) {
; X86-LABEL: cvtf4_f3:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: retl
;
; X64-LABEL: cvtf4_f3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%extractVec = shufflevector <4 x float> %x, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
Modified: llvm/trunk/test/CodeGen/X86/vec_reassociate.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_reassociate.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_reassociate.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_reassociate.ll Mon Dec 4 09:18:51 2017
@@ -4,12 +4,12 @@
define <4 x i32> @add_4i32(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: add_4i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: paddd %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: add_4i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: paddd %xmm1, %xmm0
; X64-NEXT: retq
%1 = add <4 x i32> %a0, <i32 1, i32 -2, i32 3, i32 -4>
@@ -20,12 +20,12 @@ define <4 x i32> @add_4i32(<4 x i32> %a0
define <4 x i32> @add_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: add_4i32_commute:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: paddd %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: add_4i32_commute:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: paddd %xmm1, %xmm0
; X64-NEXT: retq
%1 = add <4 x i32> <i32 1, i32 -2, i32 3, i32 -4>, %a0
@@ -36,13 +36,13 @@ define <4 x i32> @add_4i32_commute(<4 x
define <4 x i32> @mul_4i32(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: mul_4i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmulld %xmm1, %xmm0
; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_4i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulld %xmm1, %xmm0
; X64-NEXT: pmulld {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -54,13 +54,13 @@ define <4 x i32> @mul_4i32(<4 x i32> %a0
define <4 x i32> @mul_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: mul_4i32_commute:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmulld %xmm1, %xmm0
; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_4i32_commute:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulld %xmm1, %xmm0
; X64-NEXT: pmulld {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -72,13 +72,13 @@ define <4 x i32> @mul_4i32_commute(<4 x
define <4 x i32> @and_4i32(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: and_4i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: andps %xmm1, %xmm0
; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: and_4i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andps %xmm1, %xmm0
; X64-NEXT: andps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -90,13 +90,13 @@ define <4 x i32> @and_4i32(<4 x i32> %a0
define <4 x i32> @and_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: and_4i32_commute:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: andps %xmm1, %xmm0
; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: and_4i32_commute:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andps %xmm1, %xmm0
; X64-NEXT: andps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -108,13 +108,13 @@ define <4 x i32> @and_4i32_commute(<4 x
define <4 x i32> @or_4i32(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: or_4i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: orps %xmm1, %xmm0
; X86-NEXT: orps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: or_4i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: orps %xmm1, %xmm0
; X64-NEXT: orps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -126,13 +126,13 @@ define <4 x i32> @or_4i32(<4 x i32> %a0,
define <4 x i32> @or_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: or_4i32_commute:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: orps %xmm1, %xmm0
; X86-NEXT: orps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: or_4i32_commute:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: orps %xmm1, %xmm0
; X64-NEXT: orps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -144,13 +144,13 @@ define <4 x i32> @or_4i32_commute(<4 x i
define <4 x i32> @xor_4i32(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: xor_4i32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: xorps %xmm1, %xmm0
; X86-NEXT: xorps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: xor_4i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm1, %xmm0
; X64-NEXT: xorps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
@@ -162,13 +162,13 @@ define <4 x i32> @xor_4i32(<4 x i32> %a0
define <4 x i32> @xor_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: xor_4i32_commute:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: xorps %xmm1, %xmm0
; X86-NEXT: xorps {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: xor_4i32_commute:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm1, %xmm0
; X64-NEXT: xorps {{.*}}(%rip), %xmm0
; X64-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/vec_return.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_return.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_return.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_return.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
; Without any typed operations, always use the smaller xorps.
define <2 x double> @test() {
; CHECK-LABEL: test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: retl
ret <2 x double> zeroinitializer
@@ -13,7 +13,7 @@ define <2 x double> @test() {
; Prefer a constant pool load here.
define <4 x i32> @test2() nounwind {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [0,0,1,0]
; CHECK-NEXT: retl
ret <4 x i32> < i32 0, i32 0, i32 1, i32 0 >
Modified: llvm/trunk/test/CodeGen/X86/vec_sdiv_to_shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_sdiv_to_shift.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_sdiv_to_shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_sdiv_to_shift.ll Mon Dec 4 09:18:51 2017
@@ -5,7 +5,7 @@
define <8 x i16> @sdiv_vec8x16(<8 x i16> %var) {
; SSE-LABEL: sdiv_vec8x16:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psraw $15, %xmm1
; SSE-NEXT: psrlw $11, %xmm1
@@ -15,7 +15,7 @@ define <8 x i16> @sdiv_vec8x16(<8 x i16>
; SSE-NEXT: retq
;
; AVX-LABEL: sdiv_vec8x16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsraw $15, %xmm0, %xmm1
; AVX-NEXT: vpsrlw $11, %xmm1, %xmm1
; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
@@ -28,7 +28,7 @@ entry:
define <8 x i16> @sdiv_vec8x16_minsize(<8 x i16> %var) minsize {
; SSE-LABEL: sdiv_vec8x16_minsize:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psraw $15, %xmm1
; SSE-NEXT: psrlw $11, %xmm1
@@ -38,7 +38,7 @@ define <8 x i16> @sdiv_vec8x16_minsize(<
; SSE-NEXT: retq
;
; AVX-LABEL: sdiv_vec8x16_minsize:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsraw $15, %xmm0, %xmm1
; AVX-NEXT: vpsrlw $11, %xmm1, %xmm1
; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
@@ -51,7 +51,7 @@ entry:
define <4 x i32> @sdiv_vec4x32(<4 x i32> %var) {
; SSE-LABEL: sdiv_vec4x32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: psrld $28, %xmm1
@@ -61,7 +61,7 @@ define <4 x i32> @sdiv_vec4x32(<4 x i32>
; SSE-NEXT: retq
;
; AVX-LABEL: sdiv_vec4x32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrad $31, %xmm0, %xmm1
; AVX-NEXT: vpsrld $28, %xmm1, %xmm1
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
@@ -74,7 +74,7 @@ ret <4 x i32> %0
define <4 x i32> @sdiv_negative(<4 x i32> %var) {
; SSE-LABEL: sdiv_negative:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: psrld $28, %xmm1
@@ -85,7 +85,7 @@ define <4 x i32> @sdiv_negative(<4 x i32
; SSE-NEXT: retq
;
; AVX-LABEL: sdiv_negative:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrad $31, %xmm0, %xmm1
; AVX-NEXT: vpsrld $28, %xmm1, %xmm1
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
@@ -100,7 +100,7 @@ ret <4 x i32> %0
define <8 x i32> @sdiv8x32(<8 x i32> %var) {
; SSE-LABEL: sdiv8x32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psrad $31, %xmm2
; SSE-NEXT: psrld $26, %xmm2
@@ -116,7 +116,7 @@ define <8 x i32> @sdiv8x32(<8 x i32> %va
; SSE-NEXT: retq
;
; AVX1-LABEL: sdiv8x32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
; AVX1-NEXT: vpsrld $26, %xmm1, %xmm1
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm1
@@ -130,7 +130,7 @@ define <8 x i32> @sdiv8x32(<8 x i32> %va
; AVX1-NEXT: retq
;
; AVX2-LABEL: sdiv8x32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm1
; AVX2-NEXT: vpsrld $26, %ymm1, %ymm1
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
@@ -143,7 +143,7 @@ ret <8 x i32> %0
define <16 x i16> @sdiv16x16(<16 x i16> %var) {
; SSE-LABEL: sdiv16x16:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psraw $15, %xmm2
; SSE-NEXT: psrlw $14, %xmm2
@@ -159,7 +159,7 @@ define <16 x i16> @sdiv16x16(<16 x i16>
; SSE-NEXT: retq
;
; AVX1-LABEL: sdiv16x16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm1
; AVX1-NEXT: vpsrlw $14, %xmm1, %xmm1
; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm1
@@ -173,7 +173,7 @@ define <16 x i16> @sdiv16x16(<16 x i16>
; AVX1-NEXT: retq
;
; AVX2-LABEL: sdiv16x16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsraw $15, %ymm0, %ymm1
; AVX2-NEXT: vpsrlw $14, %ymm1, %ymm1
; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
@@ -188,11 +188,11 @@ entry:
define <4 x i32> @sdiv_non_splat(<4 x i32> %x) {
; SSE-LABEL: sdiv_non_splat:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: sdiv_non_splat:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: retq
%y = sdiv <4 x i32> %x, <i32 2, i32 0, i32 0, i32 0>
ret <4 x i32> %y
Modified: llvm/trunk/test/CodeGen/X86/vec_set-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set-2.ll Mon Dec 4 09:18:51 2017
@@ -4,12 +4,12 @@
define <4 x float> @test1(float %a) nounwind {
; X86-LABEL: test1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; X64-NEXT: movaps %xmm1, %xmm0
@@ -23,12 +23,12 @@ define <4 x float> @test1(float %a) noun
define <2 x i64> @test(i32 %a) nounwind {
; X86-LABEL: test:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd %edi, %xmm0
; X64-NEXT: retq
%tmp = insertelement <4 x i32> zeroinitializer, i32 %a, i32 0
Modified: llvm/trunk/test/CodeGen/X86/vec_set-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-3.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set-3.ll Mon Dec 4 09:18:51 2017
@@ -4,12 +4,12 @@
define <4 x float> @test(float %a) {
; X86-LABEL: test:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: insertps {{.*#+}} xmm0 = zero,mem[0],zero,zero
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[0],zero,zero
; X64-NEXT: retq
%tmp = insertelement <4 x float> zeroinitializer, float %a, i32 1
@@ -20,13 +20,13 @@ define <4 x float> @test(float %a) {
define <2 x i64> @test2(i32 %a) {
; X86-LABEL: test2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,0,1]
; X86-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd %edi, %xmm0
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,0,1]
; X64-NEXT: retq
@@ -38,12 +38,12 @@ define <2 x i64> @test2(i32 %a) {
define <4 x float> @test3(<4 x float> %A) {
; X86-LABEL: test3:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[0],zero,zero
; X86-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[0],zero,zero
; X64-NEXT: retq
%tmp0 = extractelement <4 x float> %A, i32 0
Modified: llvm/trunk/test/CodeGen/X86/vec_set-4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-4.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set-4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set-4.ll Mon Dec 4 09:18:51 2017
@@ -4,13 +4,13 @@
define <2 x i64> @test(i16 %a) nounwind {
; X86-LABEL: test:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pxor %xmm0, %xmm0
; X86-NEXT: pinsrw $3, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pxor %xmm0, %xmm0
; X64-NEXT: pinsrw $3, %edi, %xmm0
; X64-NEXT: retq
@@ -25,14 +25,14 @@ define <2 x i64> @test(i16 %a) nounwind
define <2 x i64> @test2(i8 %a) nounwind {
; X86-LABEL: test2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X86-NEXT: pxor %xmm0, %xmm0
; X86-NEXT: pinsrw $5, %eax, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: pxor %xmm0, %xmm0
; X64-NEXT: pinsrw $5, %eax, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/vec_set-6.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-6.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set-6.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set-6.ll Mon Dec 4 09:18:51 2017
@@ -4,14 +4,14 @@
define <4 x float> @test(float %a, float %b, float %c) nounwind {
; X86-LABEL: test:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,1]
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X64-NEXT: xorps %xmm2, %xmm2
; X64-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
Modified: llvm/trunk/test/CodeGen/X86/vec_set-7.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-7.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set-7.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set-7.ll Mon Dec 4 09:18:51 2017
@@ -4,13 +4,13 @@
define <2 x i64> @test(<2 x i64>* %p) nounwind {
; X86-LABEL: test:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: retq
%tmp = bitcast <2 x i64>* %p to double*
Modified: llvm/trunk/test/CodeGen/X86/vec_set-8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-8.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set-8.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set-8.ll Mon Dec 4 09:18:51 2017
@@ -4,12 +4,12 @@
define <2 x i64> @test(i64 %i) nounwind {
; X86-LABEL: test:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: retq
%tmp10 = insertelement <2 x i64> undef, i64 %i, i32 0
Modified: llvm/trunk/test/CodeGen/X86/vec_set-A.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-A.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set-A.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set-A.ll Mon Dec 4 09:18:51 2017
@@ -4,13 +4,13 @@
define <2 x i64> @test1() nounwind {
; X86-LABEL: test1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl $1, %eax
; X86-NEXT: movd %eax, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $1, %eax
; X64-NEXT: movq %rax, %xmm0
; X64-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/vec_set-B.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-B.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set-B.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set-B.ll Mon Dec 4 09:18:51 2017
@@ -11,14 +11,14 @@
define <2 x i64> @test3(i64 %arg) nounwind {
; X86-LABEL: test3:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl $1234567, %eax # imm = 0x12D687
; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movd %eax, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andl $1234567, %edi # imm = 0x12D687
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: retq
@@ -29,14 +29,14 @@ define <2 x i64> @test3(i64 %arg) nounwi
define <2 x i64> @test2(i64 %arg) nounwind {
; X86-LABEL: test2:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl $1234567, %eax # imm = 0x12D687
; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movd %eax, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: andl $1234567, %edi # imm = 0x12D687
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/vec_set-C.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-C.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set-C.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set-C.ll Mon Dec 4 09:18:51 2017
@@ -4,12 +4,12 @@
define <2 x i64> @t1(i64 %x) nounwind {
; X86-LABEL: t1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: retq
%tmp8 = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
Modified: llvm/trunk/test/CodeGen/X86/vec_set-D.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-D.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set-D.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set-D.ll Mon Dec 4 09:18:51 2017
@@ -3,7 +3,7 @@
define <4 x i32> @t(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: t:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: retl
%tmp1 = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
Modified: llvm/trunk/test/CodeGen/X86/vec_set-F.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-F.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set-F.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set-F.ll Mon Dec 4 09:18:51 2017
@@ -3,7 +3,7 @@
define <2 x i64> @t1(<2 x i64>* %ptr) nounwind {
; CHECK-LABEL: t1:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: retl
@@ -16,7 +16,7 @@ define <2 x i64> @t1(<2 x i64>* %ptr) no
define <2 x i64> @t2(i64 %x) nounwind {
; CHECK-LABEL: t2:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: retl
%tmp717 = bitcast i64 %x to double
Modified: llvm/trunk/test/CodeGen/X86/vec_set-H.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-H.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set-H.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set-H.ll Mon Dec 4 09:18:51 2017
@@ -3,7 +3,7 @@
define <2 x i64> @doload64(i16 signext %x) nounwind {
; CHECK-LABEL: doload64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
Modified: llvm/trunk/test/CodeGen/X86/vec_set.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_set.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_set.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define void @test(<8 x i16>* %b, i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) nounwind {
; X86-LABEL: test:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -25,7 +25,7 @@ define void @test(<8 x i16>* %b, i16 %a0
; X86-NEXT: retl
;
; X64-LABEL: test:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
Modified: llvm/trunk/test/CodeGen/X86/vec_setcc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_setcc.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_setcc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_setcc.ll Mon Dec 4 09:18:51 2017
@@ -5,13 +5,13 @@
define <16 x i8> @v16i8_icmp_uge(<16 x i8> %a, <16 x i8> %b) nounwind readnone ssp uwtable {
; SSE-LABEL: v16i8_icmp_uge:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmaxub %xmm0, %xmm1
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: v16i8_icmp_uge:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -22,13 +22,13 @@ define <16 x i8> @v16i8_icmp_uge(<16 x i
define <16 x i8> @v16i8_icmp_ule(<16 x i8> %a, <16 x i8> %b) nounwind readnone ssp uwtable {
; SSE-LABEL: v16i8_icmp_ule:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pminub %xmm0, %xmm1
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: v16i8_icmp_ule:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -39,20 +39,20 @@ define <16 x i8> @v16i8_icmp_ule(<16 x i
define <8 x i16> @v8i16_icmp_uge(<8 x i16> %a, <8 x i16> %b) nounwind readnone ssp uwtable {
; SSE2-LABEL: v8i16_icmp_uge:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psubusw %xmm0, %xmm1
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: pcmpeqw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: v8i16_icmp_uge:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxuw %xmm0, %xmm1
; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: v8i16_icmp_uge:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -63,20 +63,20 @@ define <8 x i16> @v8i16_icmp_uge(<8 x i1
define <8 x i16> @v8i16_icmp_ule(<8 x i16> %a, <8 x i16> %b) nounwind readnone ssp uwtable {
; SSE2-LABEL: v8i16_icmp_ule:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psubusw %xmm1, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pcmpeqw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: v8i16_icmp_ule:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminuw %xmm0, %xmm1
; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: v8i16_icmp_ule:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -87,7 +87,7 @@ define <8 x i16> @v8i16_icmp_ule(<8 x i1
define <4 x i32> @v4i32_icmp_uge(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp uwtable {
; SSE2-LABEL: v4i32_icmp_uge:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm2
@@ -97,13 +97,13 @@ define <4 x i32> @v4i32_icmp_uge(<4 x i3
; SSE2-NEXT: retq
;
; SSE41-LABEL: v4i32_icmp_uge:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmaxud %xmm0, %xmm1
; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: v4i32_icmp_uge:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -114,7 +114,7 @@ define <4 x i32> @v4i32_icmp_uge(<4 x i3
define <4 x i32> @v4i32_icmp_ule(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp uwtable {
; SSE2-LABEL: v4i32_icmp_ule:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -124,13 +124,13 @@ define <4 x i32> @v4i32_icmp_ule(<4 x i3
; SSE2-NEXT: retq
;
; SSE41-LABEL: v4i32_icmp_ule:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pminud %xmm0, %xmm1
; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: v4i32_icmp_ule:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -144,12 +144,12 @@ define <4 x i32> @v4i32_icmp_ule(<4 x i3
; should set all bits to 1.
define <16 x i8> @test_setcc_constfold_vi8(<16 x i8> %l, <16 x i8> %r) {
; SSE-LABEL: test_setcc_constfold_vi8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_setcc_constfold_vi8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%test1 = icmp eq <16 x i8> %l, %r
@@ -163,12 +163,12 @@ define <16 x i8> @test_setcc_constfold_v
; Make sure sensible results come from doing extension afterwards
define <16 x i8> @test_setcc_constfold_vi1(<16 x i8> %l, <16 x i8> %r) {
; SSE-LABEL: test_setcc_constfold_vi1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_setcc_constfold_vi1:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%test1 = icmp eq <16 x i8> %l, %r
@@ -182,12 +182,12 @@ define <16 x i8> @test_setcc_constfold_v
; just 32-bits wide.
define <2 x i64> @test_setcc_constfold_vi64(<2 x i64> %l, <2 x i64> %r) {
; SSE-LABEL: test_setcc_constfold_vi64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_setcc_constfold_vi64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%test1 = icmp eq <2 x i64> %l, %r
Modified: llvm/trunk/test/CodeGen/X86/vec_shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shift.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shift.ll Mon Dec 4 09:18:51 2017
@@ -4,12 +4,12 @@
define <2 x i64> @t1(<2 x i64> %b1, <2 x i64> %c) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: psllw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psllw %xmm1, %xmm0
; X64-NEXT: retq
entry:
@@ -22,13 +22,13 @@ entry:
define <2 x i64> @t3(<2 x i64> %b1, i32 %c) nounwind {
; X32-LABEL: t3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: psraw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd %edi, %xmm1
; X64-NEXT: psraw %xmm1, %xmm0
; X64-NEXT: retq
@@ -45,12 +45,12 @@ declare <8 x i16> @llvm.x86.sse2.psra.w(
define <2 x i64> @t2(<2 x i64> %b1, <2 x i64> %c) nounwind {
; X32-LABEL: t2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: psrlq %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psrlq %xmm1, %xmm0
; X64-NEXT: retq
entry:
Modified: llvm/trunk/test/CodeGen/X86/vec_shift2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shift2.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shift2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shift2.ll Mon Dec 4 09:18:51 2017
@@ -4,14 +4,14 @@
define <2 x i64> @t1(<2 x i64> %b1, <2 x i64> %c) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $14, %eax
; X32-NEXT: movd %eax, %xmm1
; X32-NEXT: psrlw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $14, %eax
; X64-NEXT: movd %eax, %xmm1
; X64-NEXT: psrlw %xmm1, %xmm0
@@ -24,14 +24,14 @@ define <2 x i64> @t1(<2 x i64> %b1, <2 x
define <4 x i32> @t2(<2 x i64> %b1, <2 x i64> %c) nounwind {
; X32-LABEL: t2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $14, %eax
; X32-NEXT: movd %eax, %xmm1
; X32-NEXT: pslld %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $14, %eax
; X64-NEXT: movd %eax, %xmm1
; X64-NEXT: pslld %xmm1, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/vec_shift3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shift3.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shift3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shift3.ll Mon Dec 4 09:18:51 2017
@@ -4,13 +4,13 @@
define <2 x i64> @t1(<2 x i64> %x1, i32 %bits) nounwind {
; X32-LABEL: t1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: psllq %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd %edi, %xmm1
; X64-NEXT: psllq %xmm1, %xmm0
; X64-NEXT: retq
@@ -21,12 +21,12 @@ entry:
define <2 x i64> @t2(<2 x i64> %x1) nounwind {
; X32-LABEL: t2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: psllq $10, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: psllq $10, %xmm0
; X64-NEXT: retq
entry:
@@ -36,13 +36,13 @@ entry:
define <2 x i64> @t3(<2 x i64> %x1, i32 %bits) nounwind {
; X32-LABEL: t3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: psraw %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: t3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movd %edi, %xmm1
; X64-NEXT: psraw %xmm1, %xmm0
; X64-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/vec_shift4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shift4.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shift4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shift4.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define <2 x i64> @shl1(<4 x i32> %r, <4 x i32> %a) nounwind readnone ssp {
; X32-LABEL: shl1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: pslld $23, %xmm1
; X32-NEXT: paddd {{\.LCPI.*}}, %xmm1
; X32-NEXT: cvttps2dq %xmm1, %xmm1
@@ -12,7 +12,7 @@ define <2 x i64> @shl1(<4 x i32> %r, <4
; X32-NEXT: retl
;
; X64-LABEL: shl1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: pslld $23, %xmm1
; X64-NEXT: paddd {{.*}}(%rip), %xmm1
; X64-NEXT: cvttps2dq %xmm1, %xmm1
@@ -32,7 +32,7 @@ entry:
define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
; X32-LABEL: shl2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm2
; X32-NEXT: psllw $5, %xmm1
; X32-NEXT: movdqa %xmm2, %xmm3
@@ -55,7 +55,7 @@ define <2 x i64> @shl2(<16 x i8> %r, <16
; X32-NEXT: retl
;
; X64-LABEL: shl2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: psllw $5, %xmm1
; X64-NEXT: movdqa %xmm2, %xmm3
Modified: llvm/trunk/test/CodeGen/X86/vec_shift5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shift5.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shift5.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shift5.ll Mon Dec 4 09:18:51 2017
@@ -8,12 +8,12 @@
define <8 x i16> @test1() {
; X32-LABEL: test1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [8,16,32,64,8,16,32,64]
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [8,16,32,64,8,16,32,64]
; X64-NEXT: retq
%1 = tail call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> <i16 1, i16 2, i16 4, i16 8, i16 1, i16 2, i16 4, i16 8>, i32 3)
@@ -22,12 +22,12 @@ define <8 x i16> @test1() {
define <8 x i16> @test2() {
; X32-LABEL: test2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [0,1,2,4,0,1,2,4]
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [0,1,2,4,0,1,2,4]
; X64-NEXT: retq
%1 = tail call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> <i16 4, i16 8, i16 16, i16 32, i16 4, i16 8, i16 16, i16 32>, i32 3)
@@ -36,12 +36,12 @@ define <8 x i16> @test2() {
define <8 x i16> @test3() {
; X32-LABEL: test3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [0,1,2,4,0,1,2,4]
; X32-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [0,1,2,4,0,1,2,4]
; X64-NEXT: retq
%1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> <i16 4, i16 8, i16 16, i16 32, i16 4, i16 8, i16 16, i16 32>, i32 3)
@@ -50,12 +50,12 @@ define <8 x i16> @test3() {
define <4 x i32> @test4() {
; X32-LABEL: test4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [8,16,32,64]
; X32-NEXT: retl
;
; X64-LABEL: test4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [8,16,32,64]
; X64-NEXT: retq
%1 = tail call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> <i32 1, i32 2, i32 4, i32 8>, i32 3)
@@ -64,12 +64,12 @@ define <4 x i32> @test4() {
define <4 x i32> @test5() {
; X32-LABEL: test5:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [0,1,2,4]
; X32-NEXT: retl
;
; X64-LABEL: test5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [0,1,2,4]
; X64-NEXT: retq
%1 = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> <i32 4, i32 8, i32 16, i32 32>, i32 3)
@@ -78,12 +78,12 @@ define <4 x i32> @test5() {
define <4 x i32> @test6() {
; X32-LABEL: test6:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [0,1,2,4]
; X32-NEXT: retl
;
; X64-LABEL: test6:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [0,1,2,4]
; X64-NEXT: retq
%1 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> <i32 4, i32 8, i32 16, i32 32>, i32 3)
@@ -92,12 +92,12 @@ define <4 x i32> @test6() {
define <2 x i64> @test7() {
; X32-LABEL: test7:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [8,0,16,0]
; X32-NEXT: retl
;
; X64-LABEL: test7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [8,16]
; X64-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> <i64 1, i64 2>, i32 3)
@@ -106,12 +106,12 @@ define <2 x i64> @test7() {
define <2 x i64> @test8() {
; X32-LABEL: test8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = [1,0,2,0]
; X32-NEXT: retl
;
; X64-LABEL: test8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = [1,2]
; X64-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> <i64 8, i64 16>, i32 3)
@@ -120,12 +120,12 @@ define <2 x i64> @test8() {
define <8 x i16> @test9() {
; X32-LABEL: test9:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
; X32-NEXT: retl
;
; X64-LABEL: test9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
; X64-NEXT: retq
%1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> <i16 15, i16 8, i16 undef, i16 undef, i16 31, i16 undef, i16 64, i16 128>, i32 3)
@@ -134,12 +134,12 @@ define <8 x i16> @test9() {
define <4 x i32> @test10() {
; X32-LABEL: test10:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = <u,1,u,4>
; X32-NEXT: retl
;
; X64-LABEL: test10:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = <u,1,u,4>
; X64-NEXT: retq
%1 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> <i32 undef, i32 8, i32 undef, i32 32>, i32 3)
@@ -148,12 +148,12 @@ define <4 x i32> @test10() {
define <2 x i64> @test11() {
; X32-LABEL: test11:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = <u,u,3,0>
; X32-NEXT: retl
;
; X64-LABEL: test11:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = <u,3>
; X64-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> <i64 undef, i64 31>, i32 3)
@@ -162,12 +162,12 @@ define <2 x i64> @test11() {
define <8 x i16> @test12() {
; X32-LABEL: test12:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
; X32-NEXT: retl
;
; X64-LABEL: test12:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
; X64-NEXT: retq
%1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> <i16 15, i16 8, i16 undef, i16 undef, i16 31, i16 undef, i16 64, i16 128>, i32 3)
@@ -176,12 +176,12 @@ define <8 x i16> @test12() {
define <4 x i32> @test13() {
; X32-LABEL: test13:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = <u,1,u,4>
; X32-NEXT: retl
;
; X64-LABEL: test13:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = <u,1,u,4>
; X64-NEXT: retq
%1 = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> <i32 undef, i32 8, i32 undef, i32 32>, i32 3)
@@ -190,12 +190,12 @@ define <4 x i32> @test13() {
define <8 x i16> @test14() {
; X32-LABEL: test14:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
; X32-NEXT: retl
;
; X64-LABEL: test14:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
; X64-NEXT: retq
%1 = tail call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> <i16 15, i16 8, i16 undef, i16 undef, i16 31, i16 undef, i16 64, i16 128>, i32 3)
@@ -204,12 +204,12 @@ define <8 x i16> @test14() {
define <4 x i32> @test15() {
; X32-LABEL: test15:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = <u,64,u,256>
; X32-NEXT: retl
;
; X64-LABEL: test15:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = <u,64,u,256>
; X64-NEXT: retq
%1 = tail call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> <i32 undef, i32 8, i32 undef, i32 32>, i32 3)
@@ -218,12 +218,12 @@ define <4 x i32> @test15() {
define <2 x i64> @test16() {
; X32-LABEL: test16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movaps {{.*#+}} xmm0 = <u,u,248,0>
; X32-NEXT: retl
;
; X64-LABEL: test16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movaps {{.*#+}} xmm0 = <u,248>
; X64-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> <i64 undef, i64 31>, i32 3)
Modified: llvm/trunk/test/CodeGen/X86/vec_shift6.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shift6.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shift6.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shift6.ll Mon Dec 4 09:18:51 2017
@@ -9,17 +9,17 @@
define <8 x i16> @test1(<8 x i16> %a) {
; SSE-LABEL: test1:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX2-LABEL: test1:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
%shl = shl <8 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11>
@@ -28,17 +28,17 @@ define <8 x i16> @test1(<8 x i16> %a) {
define <8 x i16> @test2(<8 x i16> %a) {
; SSE-LABEL: test2:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX2-LABEL: test2:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test2:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
%shl = shl <8 x i16> %a, <i16 0, i16 undef, i16 0, i16 0, i16 1, i16 undef, i16 -1, i16 1>
@@ -51,17 +51,17 @@ define <8 x i16> @test2(<8 x i16> %a) {
define <4 x i32> @test3(<4 x i32> %a) {
; SSE-LABEL: test3:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX2-LABEL: test3:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test3:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
%shl = shl <4 x i32> %a, <i32 1, i32 -1, i32 2, i32 -3>
@@ -70,17 +70,17 @@ define <4 x i32> @test3(<4 x i32> %a) {
define <4 x i32> @test4(<4 x i32> %a) {
; SSE-LABEL: test4:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX2-LABEL: test4:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test4:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
%shl = shl <4 x i32> %a, <i32 0, i32 0, i32 1, i32 1>
@@ -93,19 +93,19 @@ define <4 x i32> @test4(<4 x i32> %a) {
define <16 x i16> @test5(<16 x i16> %a) {
; SSE-LABEL: test5:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2,2,4,8,128,1,512,2048]
; SSE-NEXT: pmullw %xmm2, %xmm0
; SSE-NEXT: pmullw %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX2-LABEL: test5:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test5:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
%shl = shl <16 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11, i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11>
@@ -118,19 +118,19 @@ define <16 x i16> @test5(<16 x i16> %a)
define <8 x i32> @test6(<8 x i32> %a) {
; SSE-LABEL: test6:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2,2,4,8]
; SSE-NEXT: pmulld %xmm2, %xmm0
; SSE-NEXT: pmulld %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX2-LABEL: test6:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test6:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
%shl = shl <8 x i32> %a, <i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3>
@@ -143,7 +143,7 @@ define <8 x i32> @test6(<8 x i32> %a) {
define <32 x i16> @test7(<32 x i16> %a) {
; SSE-LABEL: test7:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [2,2,4,8,128,1,512,2048]
; SSE-NEXT: pmullw %xmm4, %xmm0
; SSE-NEXT: pmullw %xmm4, %xmm1
@@ -152,7 +152,7 @@ define <32 x i16> @test7(<32 x i16> %a)
; SSE-NEXT: retq
;
; AVX2-LABEL: test7:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [2,2,4,8,128,1,512,2048,2,2,4,8,128,1,512,2048]
; AVX2-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm0
@@ -160,7 +160,7 @@ define <32 x i16> @test7(<32 x i16> %a)
; AVX2-NEXT: retq
;
; AVX512-LABEL: test7:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [2,2,4,8,128,1,512,2048,2,2,4,8,128,1,512,2048]
; AVX512-NEXT: # ymm2 = mem[0,1,0,1]
; AVX512-NEXT: vpmullw %ymm2, %ymm0, %ymm0
@@ -175,7 +175,7 @@ define <32 x i16> @test7(<32 x i16> %a)
define <16 x i32> @test8(<16 x i32> %a) {
; SSE-LABEL: test8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [2,2,4,8]
; SSE-NEXT: pmulld %xmm4, %xmm0
; SSE-NEXT: pmulld %xmm4, %xmm1
@@ -184,7 +184,7 @@ define <16 x i32> @test8(<16 x i32> %a)
; SSE-NEXT: retq
;
; AVX2-LABEL: test8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [1,1,2,3,1,1,2,3]
; AVX2-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-NEXT: vpsllvd %ymm2, %ymm0, %ymm0
@@ -192,7 +192,7 @@ define <16 x i32> @test8(<16 x i32> %a)
; AVX2-NEXT: retq
;
; AVX512-LABEL: test8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvd {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: retq
%shl = shl <16 x i32> %a, <i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3>
@@ -203,7 +203,7 @@ define <16 x i32> @test8(<16 x i32> %a)
define <8 x i64> @test9(<8 x i64> %a) {
; SSE-LABEL: test9:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm1, %xmm4
; SSE-NEXT: psllq $3, %xmm4
; SSE-NEXT: psllq $2, %xmm1
@@ -217,14 +217,14 @@ define <8 x i64> @test9(<8 x i64> %a) {
; SSE-NEXT: retq
;
; AVX2-LABEL: test9:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [1,1,2,3]
; AVX2-NEXT: vpsllvq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsllvq %ymm2, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test9:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvq {{.*}}(%rip), %zmm0, %zmm0
; AVX512-NEXT: retq
%shl = shl <8 x i64> %a, <i64 1, i64 1, i64 2, i64 3, i64 1, i64 1, i64 2, i64 3>
Modified: llvm/trunk/test/CodeGen/X86/vec_shift7.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shift7.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shift7.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shift7.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@
define i64 @test1(<2 x i64> %a) {
; X32-LABEL: test1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: movdqa %xmm0, %xmm1
; X32-NEXT: psllq $2, %xmm1
; X32-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
@@ -16,7 +16,7 @@ define i64 @test1(<2 x i64> %a) {
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: movq %xmm0, %rax
; X64-NEXT: retq
entry:
Modified: llvm/trunk/test/CodeGen/X86/vec_ss_load_fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_ss_load_fold.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_ss_load_fold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_ss_load_fold.ll Mon Dec 4 09:18:51 2017
@@ -8,7 +8,7 @@
define i16 @test1(float %f) nounwind {
; X32-LABEL: test1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: addss LCPI0_0, %xmm0
; X32-NEXT: mulss LCPI0_1, %xmm0
@@ -21,7 +21,7 @@ define i16 @test1(float %f) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: addss {{.*}}(%rip), %xmm0
; X64-NEXT: mulss {{.*}}(%rip), %xmm0
; X64-NEXT: xorps %xmm1, %xmm1
@@ -33,7 +33,7 @@ define i16 @test1(float %f) nounwind {
; X64-NEXT: retq
;
; X32_AVX1-LABEL: test1:
-; X32_AVX1: ## BB#0:
+; X32_AVX1: ## %bb.0:
; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32_AVX1-NEXT: vaddss LCPI0_0, %xmm0, %xmm0
; X32_AVX1-NEXT: vmulss LCPI0_1, %xmm0, %xmm0
@@ -46,7 +46,7 @@ define i16 @test1(float %f) nounwind {
; X32_AVX1-NEXT: retl
;
; X64_AVX1-LABEL: test1:
-; X64_AVX1: ## BB#0:
+; X64_AVX1: ## %bb.0:
; X64_AVX1-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX1-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -58,7 +58,7 @@ define i16 @test1(float %f) nounwind {
; X64_AVX1-NEXT: retq
;
; X32_AVX512-LABEL: test1:
-; X32_AVX512: ## BB#0:
+; X32_AVX512: ## %bb.0:
; X32_AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32_AVX512-NEXT: vaddss LCPI0_0, %xmm0, %xmm0
; X32_AVX512-NEXT: vmulss LCPI0_1, %xmm0, %xmm0
@@ -71,7 +71,7 @@ define i16 @test1(float %f) nounwind {
; X32_AVX512-NEXT: retl
;
; X64_AVX512-LABEL: test1:
-; X64_AVX512: ## BB#0:
+; X64_AVX512: ## %bb.0:
; X64_AVX512-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX512-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -96,7 +96,7 @@ define i16 @test1(float %f) nounwind {
define i16 @test2(float %f) nounwind {
; X32-LABEL: test2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: addss LCPI1_0, %xmm0
; X32-NEXT: mulss LCPI1_1, %xmm0
@@ -108,7 +108,7 @@ define i16 @test2(float %f) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: addss {{.*}}(%rip), %xmm0
; X64-NEXT: mulss {{.*}}(%rip), %xmm0
; X64-NEXT: minss {{.*}}(%rip), %xmm0
@@ -119,7 +119,7 @@ define i16 @test2(float %f) nounwind {
; X64-NEXT: retq
;
; X32_AVX-LABEL: test2:
-; X32_AVX: ## BB#0:
+; X32_AVX: ## %bb.0:
; X32_AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32_AVX-NEXT: vaddss LCPI1_0, %xmm0, %xmm0
; X32_AVX-NEXT: vmulss LCPI1_1, %xmm0, %xmm0
@@ -131,7 +131,7 @@ define i16 @test2(float %f) nounwind {
; X32_AVX-NEXT: retl
;
; X64_AVX-LABEL: test2:
-; X64_AVX: ## BB#0:
+; X64_AVX: ## %bb.0:
; X64_AVX-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0
@@ -166,35 +166,35 @@ declare <4 x float> @f()
define <4 x float> @test3(<4 x float> %A, float *%b, i32 %C) nounwind {
; X32-LABEL: test3:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: roundss $4, (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test3:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: roundss $4, (%rdi), %xmm0
; X64-NEXT: retq
;
; X32_AVX1-LABEL: test3:
-; X32_AVX1: ## BB#0:
+; X32_AVX1: ## %bb.0:
; X32_AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32_AVX1-NEXT: vroundss $4, (%eax), %xmm0, %xmm0
; X32_AVX1-NEXT: retl
;
; X64_AVX1-LABEL: test3:
-; X64_AVX1: ## BB#0:
+; X64_AVX1: ## %bb.0:
; X64_AVX1-NEXT: vroundss $4, (%rdi), %xmm0, %xmm0
; X64_AVX1-NEXT: retq
;
; X32_AVX512-LABEL: test3:
-; X32_AVX512: ## BB#0:
+; X32_AVX512: ## %bb.0:
; X32_AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32_AVX512-NEXT: vrndscaless $4, (%eax), %xmm0, %xmm0
; X32_AVX512-NEXT: retl
;
; X64_AVX512-LABEL: test3:
-; X64_AVX512: ## BB#0:
+; X64_AVX512: ## %bb.0:
; X64_AVX512-NEXT: vrndscaless $4, (%rdi), %xmm0, %xmm0
; X64_AVX512-NEXT: retq
%a = load float , float *%b
@@ -205,7 +205,7 @@ define <4 x float> @test3(<4 x float> %A
define <4 x float> @test4(<4 x float> %A, float *%b, i32 %C) nounwind {
; X32-LABEL: test4:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: subl $28, %esp
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -216,7 +216,7 @@ define <4 x float> @test4(<4 x float> %A
; X32-NEXT: retl
;
; X64-LABEL: test4:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: subq $24, %rsp
; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
@@ -226,7 +226,7 @@ define <4 x float> @test4(<4 x float> %A
; X64-NEXT: retq
;
; X32_AVX1-LABEL: test4:
-; X32_AVX1: ## BB#0:
+; X32_AVX1: ## %bb.0:
; X32_AVX1-NEXT: subl $28, %esp
; X32_AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -237,7 +237,7 @@ define <4 x float> @test4(<4 x float> %A
; X32_AVX1-NEXT: retl
;
; X64_AVX1-LABEL: test4:
-; X64_AVX1: ## BB#0:
+; X64_AVX1: ## %bb.0:
; X64_AVX1-NEXT: subq $24, %rsp
; X64_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64_AVX1-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
@@ -247,7 +247,7 @@ define <4 x float> @test4(<4 x float> %A
; X64_AVX1-NEXT: retq
;
; X32_AVX512-LABEL: test4:
-; X32_AVX512: ## BB#0:
+; X32_AVX512: ## %bb.0:
; X32_AVX512-NEXT: subl $28, %esp
; X32_AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32_AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -259,7 +259,7 @@ define <4 x float> @test4(<4 x float> %A
; X32_AVX512-NEXT: retl
;
; X64_AVX512-LABEL: test4:
-; X64_AVX512: ## BB#0:
+; X64_AVX512: ## %bb.0:
; X64_AVX512-NEXT: subq $24, %rsp
; X64_AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64_AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
@@ -278,28 +278,28 @@ define <4 x float> @test4(<4 x float> %A
; PR13576
define <2 x double> @test5() nounwind uwtable readnone noinline {
; X32-LABEL: test5:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movaps {{.*#+}} xmm0 = [4.569870e+02,1.233210e+02]
; X32-NEXT: movl $128, %eax
; X32-NEXT: cvtsi2sdl %eax, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test5:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: movaps {{.*#+}} xmm0 = [4.569870e+02,1.233210e+02]
; X64-NEXT: movl $128, %eax
; X64-NEXT: cvtsi2sdl %eax, %xmm0
; X64-NEXT: retq
;
; X32_AVX-LABEL: test5:
-; X32_AVX: ## BB#0: ## %entry
+; X32_AVX: ## %bb.0: ## %entry
; X32_AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4.569870e+02,1.233210e+02]
; X32_AVX-NEXT: movl $128, %eax
; X32_AVX-NEXT: vcvtsi2sdl %eax, %xmm0, %xmm0
; X32_AVX-NEXT: retl
;
; X64_AVX-LABEL: test5:
-; X64_AVX: ## BB#0: ## %entry
+; X64_AVX: ## %bb.0: ## %entry
; X64_AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4.569870e+02,1.233210e+02]
; X64_AVX-NEXT: movl $128, %eax
; X64_AVX-NEXT: vcvtsi2sdl %eax, %xmm0, %xmm0
@@ -313,24 +313,24 @@ declare <2 x double> @llvm.x86.sse2.cvts
define <4 x float> @minss_fold(float* %x, <4 x float> %y) {
; X32-LABEL: minss_fold:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: minss (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: minss_fold:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: minss (%rdi), %xmm0
; X64-NEXT: retq
;
; X32_AVX-LABEL: minss_fold:
-; X32_AVX: ## BB#0: ## %entry
+; X32_AVX: ## %bb.0: ## %entry
; X32_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32_AVX-NEXT: vminss (%eax), %xmm0, %xmm0
; X32_AVX-NEXT: retl
;
; X64_AVX-LABEL: minss_fold:
-; X64_AVX: ## BB#0: ## %entry
+; X64_AVX: ## %bb.0: ## %entry
; X64_AVX-NEXT: vminss (%rdi), %xmm0, %xmm0
; X64_AVX-NEXT: retq
entry:
@@ -345,24 +345,24 @@ entry:
define <4 x float> @maxss_fold(float* %x, <4 x float> %y) {
; X32-LABEL: maxss_fold:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: maxss (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: maxss_fold:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: maxss (%rdi), %xmm0
; X64-NEXT: retq
;
; X32_AVX-LABEL: maxss_fold:
-; X32_AVX: ## BB#0: ## %entry
+; X32_AVX: ## %bb.0: ## %entry
; X32_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32_AVX-NEXT: vmaxss (%eax), %xmm0, %xmm0
; X32_AVX-NEXT: retl
;
; X64_AVX-LABEL: maxss_fold:
-; X64_AVX: ## BB#0: ## %entry
+; X64_AVX: ## %bb.0: ## %entry
; X64_AVX-NEXT: vmaxss (%rdi), %xmm0, %xmm0
; X64_AVX-NEXT: retq
entry:
@@ -377,24 +377,24 @@ entry:
define <4 x float> @cmpss_fold(float* %x, <4 x float> %y) {
; X32-LABEL: cmpss_fold:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: cmpeqss (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: cmpss_fold:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: cmpeqss (%rdi), %xmm0
; X64-NEXT: retq
;
; X32_AVX-LABEL: cmpss_fold:
-; X32_AVX: ## BB#0: ## %entry
+; X32_AVX: ## %bb.0: ## %entry
; X32_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32_AVX-NEXT: vcmpeqss (%eax), %xmm0, %xmm0
; X32_AVX-NEXT: retl
;
; X64_AVX-LABEL: cmpss_fold:
-; X64_AVX: ## BB#0: ## %entry
+; X64_AVX: ## %bb.0: ## %entry
; X64_AVX-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0
; X64_AVX-NEXT: retq
entry:
@@ -411,7 +411,7 @@ declare <4 x float> @llvm.x86.sse.cmp.ss
define <4 x float> @double_fold(float* %x, <4 x float> %y) {
; X32-LABEL: double_fold:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: movaps %xmm0, %xmm2
@@ -421,7 +421,7 @@ define <4 x float> @double_fold(float* %
; X32-NEXT: retl
;
; X64-LABEL: double_fold:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: movaps %xmm0, %xmm2
; X64-NEXT: minss %xmm1, %xmm2
@@ -430,7 +430,7 @@ define <4 x float> @double_fold(float* %
; X64-NEXT: retq
;
; X32_AVX-LABEL: double_fold:
-; X32_AVX: ## BB#0: ## %entry
+; X32_AVX: ## %bb.0: ## %entry
; X32_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32_AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32_AVX-NEXT: vminss %xmm1, %xmm0, %xmm2
@@ -439,7 +439,7 @@ define <4 x float> @double_fold(float* %
; X32_AVX-NEXT: retl
;
; X64_AVX-LABEL: double_fold:
-; X64_AVX: ## BB#0: ## %entry
+; X64_AVX: ## %bb.0: ## %entry
; X64_AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64_AVX-NEXT: vminss %xmm1, %xmm0, %xmm2
; X64_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/vec_trunc_sext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_trunc_sext.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_trunc_sext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_trunc_sext.ll Mon Dec 4 09:18:51 2017
@@ -10,7 +10,7 @@
define <4 x i32> @trunc_sext(<4 x i16>* %in) {
; NO_SSE_41-LABEL: trunc_sext:
-; NO_SSE_41: # BB#0:
+; NO_SSE_41: # %bb.0:
; NO_SSE_41-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; NO_SSE_41-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; NO_SSE_41-NEXT: pslld $24, %xmm0
@@ -18,7 +18,7 @@ define <4 x i32> @trunc_sext(<4 x i16>*
; NO_SSE_41-NEXT: retq
;
; SSE_41-LABEL: trunc_sext:
-; SSE_41: # BB#0:
+; SSE_41: # %bb.0:
; SSE_41-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; SSE_41-NEXT: pslld $24, %xmm0
; SSE_41-NEXT: psrad $24, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll Mon Dec 4 09:18:51 2017
@@ -28,7 +28,7 @@
define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
; SSE2-LABEL: test_uitofp_v4i32_to_v4f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSE2-NEXT: andps %xmm0, %xmm1
; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1
@@ -39,7 +39,7 @@ define <4 x float> @test_uitofp_v4i32_to
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_uitofp_v4i32_to_v4f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; SSE41-NEXT: cvtdq2ps %xmm1, %xmm1
@@ -50,7 +50,7 @@ define <4 x float> @test_uitofp_v4i32_to
; SSE41-NEXT: retq
;
; AVX-LABEL: test_uitofp_v4i32_to_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; AVX-NEXT: vcvtdq2ps %xmm1, %xmm1
@@ -61,7 +61,7 @@ define <4 x float> @test_uitofp_v4i32_to
; AVX-NEXT: retq
;
; AVX2-LABEL: test_uitofp_v4i32_to_v4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX2-NEXT: vcvtdq2ps %xmm1, %xmm1
; AVX2-NEXT: vbroadcastss [[FPMASKCSTADDR]](%rip), %xmm2
@@ -73,7 +73,7 @@ define <4 x float> @test_uitofp_v4i32_to
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_uitofp_v4i32_to_v4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512F-NEXT: # kill
@@ -81,7 +81,7 @@ define <4 x float> @test_uitofp_v4i32_to
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_uitofp_v4i32_to_v4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2ps %xmm0, %xmm0
; AVX512VL-NEXT: retq
%tmp = uitofp <4 x i32> %arg to <4 x float>
@@ -105,7 +105,7 @@ define <4 x float> @test_uitofp_v4i32_to
define <8 x float> @test_uitofp_v8i32_to_v8f32(<8 x i32> %arg) {
; SSE2-LABEL: test_uitofp_v8i32_to_v8f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psrld $16, %xmm2
; SSE2-NEXT: cvtdq2ps %xmm2, %xmm2
@@ -125,7 +125,7 @@ define <8 x float> @test_uitofp_v8i32_to
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_uitofp_v8i32_to_v8f32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: psrld $16, %xmm2
; SSE41-NEXT: cvtdq2ps %xmm2, %xmm2
@@ -145,7 +145,7 @@ define <8 x float> @test_uitofp_v8i32_to
; SSE41-NEXT: retq
;
; AVX-LABEL: test_uitofp_v8i32_to_v8f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX-NEXT: vpsrld $16, %xmm2, %xmm2
@@ -158,7 +158,7 @@ define <8 x float> @test_uitofp_v8i32_to
; AVX-NEXT: retq
;
; AVX2-LABEL: test_uitofp_v8i32_to_v8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm1
; AVX2-NEXT: vcvtdq2ps %ymm1, %ymm1
; AVX2-NEXT: vbroadcastss [[FPMASKCSTADDR_v8]](%rip), %ymm2
@@ -170,14 +170,14 @@ define <8 x float> @test_uitofp_v8i32_to
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_uitofp_v8i32_to_v8f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
; AVX512F-NEXT: # kill
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_uitofp_v8i32_to_v8f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2ps %ymm0, %ymm0
; AVX512VL-NEXT: retq
%tmp = uitofp <8 x i32> %arg to <8 x float>
Modified: llvm/trunk/test/CodeGen/X86/vec_unsafe-fp-math.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_unsafe-fp-math.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_unsafe-fp-math.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_unsafe-fp-math.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@
; Subtracting zero is free.
define <4 x float> @vec_fsub_zero(<4 x float> %x) {
; CHECK-LABEL: vec_fsub_zero:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: retq
%sub = fsub <4 x float> %x, zeroinitializer
ret <4 x float> %sub
@@ -15,7 +15,7 @@ define <4 x float> @vec_fsub_zero(<4 x f
; Negating doesn't require subtraction.
define <4 x float> @vec_fneg(<4 x float> %x) {
; CHECK-LABEL: vec_fneg:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
%sub = fsub <4 x float> zeroinitializer, %x
Modified: llvm/trunk/test/CodeGen/X86/vec_zero_cse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_zero_cse.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_zero_cse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_zero_cse.ll Mon Dec 4 09:18:51 2017
@@ -12,7 +12,7 @@
define void @test1() {
; X32-LABEL: test1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $0, M1+4
; X32-NEXT: movl $0, M1
; X32-NEXT: xorps %xmm0, %xmm0
@@ -20,7 +20,7 @@ define void @test1() {
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq $0, {{.*}}(%rip)
; X64-NEXT: movq $0, {{.*}}(%rip)
; X64-NEXT: retq
@@ -31,7 +31,7 @@ define void @test1() {
define void @test2() {
; X32-LABEL: test2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $-1, M1+4
; X32-NEXT: movl $-1, M1
; X32-NEXT: pcmpeqd %xmm0, %xmm0
@@ -39,7 +39,7 @@ define void @test2() {
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movq $-1, {{.*}}(%rip)
; X64-NEXT: movq {{.*}}(%rip), %rax
; X64-NEXT: movq %rax, {{.*}}(%rip)
@@ -51,14 +51,14 @@ define void @test2() {
define void @test3() {
; X32-LABEL: test3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: xorps %xmm0, %xmm0
; X32-NEXT: movaps %xmm0, S1
; X32-NEXT: movaps %xmm0, S2
; X32-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movaps %xmm0, {{.*}}(%rip)
; X64-NEXT: movaps %xmm0, {{.*}}(%rip)
@@ -70,14 +70,14 @@ define void @test3() {
define void @test4() {
; X32-LABEL: test4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pcmpeqd %xmm0, %xmm0
; X32-NEXT: movdqa %xmm0, S1
; X32-NEXT: movdqa %xmm0, S2
; X32-NEXT: retl
;
; X64-LABEL: test4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pcmpeqd %xmm0, %xmm0
; X64-NEXT: movdqa %xmm0, {{.*}}(%rip)
; X64-NEXT: movdqa %xmm0, {{.*}}(%rip)
Modified: llvm/trunk/test/CodeGen/X86/vector-bitreverse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-bitreverse.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-bitreverse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-bitreverse.ll Mon Dec 4 09:18:51 2017
@@ -10,7 +10,7 @@
define i8 @test_bitreverse_i8(i8 %a) nounwind {
; SSE-LABEL: test_bitreverse_i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: rolb $4, %dil
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: andb $51, %al
@@ -28,7 +28,7 @@ define i8 @test_bitreverse_i8(i8 %a) nou
; SSE-NEXT: retq
;
; AVX-LABEL: test_bitreverse_i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: rolb $4, %dil
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: andb $51, %al
@@ -46,7 +46,7 @@ define i8 @test_bitreverse_i8(i8 %a) nou
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovd %edi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vpextrb $0, %xmm0, %eax
@@ -58,7 +58,7 @@ define i8 @test_bitreverse_i8(i8 %a) nou
define i16 @test_bitreverse_i16(i16 %a) nounwind {
; SSE-LABEL: test_bitreverse_i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE-NEXT: rolw $8, %di
; SSE-NEXT: movl %edi, %eax
@@ -81,7 +81,7 @@ define i16 @test_bitreverse_i16(i16 %a)
; SSE-NEXT: retq
;
; AVX-LABEL: test_bitreverse_i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX-NEXT: rolw $8, %di
; AVX-NEXT: movl %edi, %eax
@@ -104,7 +104,7 @@ define i16 @test_bitreverse_i16(i16 %a)
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovd %edi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vmovd %xmm0, %eax
@@ -116,7 +116,7 @@ define i16 @test_bitreverse_i16(i16 %a)
define i32 @test_bitreverse_i32(i32 %a) nounwind {
; SSE-LABEL: test_bitreverse_i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE-NEXT: bswapl %edi
; SSE-NEXT: movl %edi, %eax
@@ -138,7 +138,7 @@ define i32 @test_bitreverse_i32(i32 %a)
; SSE-NEXT: retq
;
; AVX-LABEL: test_bitreverse_i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX-NEXT: bswapl %edi
; AVX-NEXT: movl %edi, %eax
@@ -160,7 +160,7 @@ define i32 @test_bitreverse_i32(i32 %a)
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovd %edi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vmovd %xmm0, %eax
@@ -171,7 +171,7 @@ define i32 @test_bitreverse_i32(i32 %a)
define i64 @test_bitreverse_i64(i64 %a) nounwind {
; SSE-LABEL: test_bitreverse_i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: bswapq %rdi
; SSE-NEXT: movabsq $1085102592571150095, %rax # imm = 0xF0F0F0F0F0F0F0F
; SSE-NEXT: andq %rdi, %rax
@@ -195,7 +195,7 @@ define i64 @test_bitreverse_i64(i64 %a)
; SSE-NEXT: retq
;
; AVX-LABEL: test_bitreverse_i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: bswapq %rdi
; AVX-NEXT: movabsq $1085102592571150095, %rax # imm = 0xF0F0F0F0F0F0F0F
; AVX-NEXT: andq %rdi, %rax
@@ -219,7 +219,7 @@ define i64 @test_bitreverse_i64(i64 %a)
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovq %rdi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vmovq %xmm0, %rax
@@ -230,7 +230,7 @@ define i64 @test_bitreverse_i64(i64 %a)
define <16 x i8> @test_bitreverse_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pand %xmm1, %xmm2
@@ -259,7 +259,7 @@ define <16 x i8> @test_bitreverse_v16i8(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v16i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm1, %xmm2
@@ -274,7 +274,7 @@ define <16 x i8> @test_bitreverse_v16i8(
; SSSE3-NEXT: retq
;
; AVX-LABEL: test_bitreverse_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
@@ -287,7 +287,7 @@ define <16 x i8> @test_bitreverse_v16i8(
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: retq
%b = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> %a)
@@ -296,7 +296,7 @@ define <16 x i8> @test_bitreverse_v16i8(
define <8 x i16> @test_bitreverse_v8i16(<8 x i16> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
@@ -334,7 +334,7 @@ define <8 x i16> @test_bitreverse_v8i16(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v8i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
@@ -350,7 +350,7 @@ define <8 x i16> @test_bitreverse_v8i16(
; SSSE3-NEXT: retq
;
; AVX-LABEL: test_bitreverse_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
@@ -364,7 +364,7 @@ define <8 x i16> @test_bitreverse_v8i16(
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_v8i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: retq
%b = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %a)
@@ -373,7 +373,7 @@ define <8 x i16> @test_bitreverse_v8i16(
define <4 x i32> @test_bitreverse_v4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
@@ -411,7 +411,7 @@ define <4 x i32> @test_bitreverse_v4i32(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v4i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
@@ -427,7 +427,7 @@ define <4 x i32> @test_bitreverse_v4i32(
; SSSE3-NEXT: retq
;
; AVX-LABEL: test_bitreverse_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
@@ -441,7 +441,7 @@ define <4 x i32> @test_bitreverse_v4i32(
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_v4i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: retq
%b = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %a)
@@ -450,7 +450,7 @@ define <4 x i32> @test_bitreverse_v4i32(
define <2 x i64> @test_bitreverse_v2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
@@ -490,7 +490,7 @@ define <2 x i64> @test_bitreverse_v2i64(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v2i64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
@@ -506,7 +506,7 @@ define <2 x i64> @test_bitreverse_v2i64(
; SSSE3-NEXT: retq
;
; AVX-LABEL: test_bitreverse_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
@@ -520,7 +520,7 @@ define <2 x i64> @test_bitreverse_v2i64(
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_v2i64:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: retq
%b = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %a)
@@ -529,7 +529,7 @@ define <2 x i64> @test_bitreverse_v2i64(
define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pand %xmm2, %xmm3
@@ -586,7 +586,7 @@ define <32 x i8> @test_bitreverse_v32i8(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v32i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm4, %xmm2
@@ -611,7 +611,7 @@ define <32 x i8> @test_bitreverse_v32i8(
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -632,7 +632,7 @@ define <32 x i8> @test_bitreverse_v32i8(
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
@@ -645,7 +645,7 @@ define <32 x i8> @test_bitreverse_v32i8(
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_bitreverse_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
@@ -658,7 +658,7 @@ define <32 x i8> @test_bitreverse_v32i8(
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v32i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]
; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
@@ -667,7 +667,7 @@ define <32 x i8> @test_bitreverse_v32i8(
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v32i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]
; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
@@ -680,7 +680,7 @@ define <32 x i8> @test_bitreverse_v32i8(
define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v16i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
@@ -754,7 +754,7 @@ define <16 x i16> @test_bitreverse_v16i1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v16i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; SSSE3-NEXT: pshufb %xmm4, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -782,7 +782,7 @@ define <16 x i16> @test_bitreverse_v16i1
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -806,7 +806,7 @@ define <16 x i16> @test_bitreverse_v16i1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
@@ -820,7 +820,7 @@ define <16 x i16> @test_bitreverse_v16i1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_bitreverse_v16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
@@ -834,7 +834,7 @@ define <16 x i16> @test_bitreverse_v16i1
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v16i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [81,80,83,82,85,84,87,86,89,88,91,90,93,92,95,94]
; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
@@ -843,7 +843,7 @@ define <16 x i16> @test_bitreverse_v16i1
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v16i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [81,80,83,82,85,84,87,86,89,88,91,90,93,92,95,94]
; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
@@ -856,7 +856,7 @@ define <16 x i16> @test_bitreverse_v16i1
define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v8i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
@@ -930,7 +930,7 @@ define <8 x i32> @test_bitreverse_v8i32(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v8i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; SSSE3-NEXT: pshufb %xmm4, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -958,7 +958,7 @@ define <8 x i32> @test_bitreverse_v8i32(
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -982,7 +982,7 @@ define <8 x i32> @test_bitreverse_v8i32(
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,19,18,17,16,23,22,21,20,27,26,25,24,31,30,29,28]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
@@ -996,7 +996,7 @@ define <8 x i32> @test_bitreverse_v8i32(
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_bitreverse_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,19,18,17,16,23,22,21,20,27,26,25,24,31,30,29,28]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
@@ -1010,7 +1010,7 @@ define <8 x i32> @test_bitreverse_v8i32(
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v8i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [83,82,81,80,87,86,85,84,91,90,89,88,95,94,93,92]
; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
@@ -1019,7 +1019,7 @@ define <8 x i32> @test_bitreverse_v8i32(
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v8i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [83,82,81,80,87,86,85,84,91,90,89,88,95,94,93,92]
; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
@@ -1032,7 +1032,7 @@ define <8 x i32> @test_bitreverse_v8i32(
define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
@@ -1110,7 +1110,7 @@ define <4 x i64> @test_bitreverse_v4i64(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v4i64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; SSSE3-NEXT: pshufb %xmm4, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -1138,7 +1138,7 @@ define <4 x i64> @test_bitreverse_v4i64(
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -1162,7 +1162,7 @@ define <4 x i64> @test_bitreverse_v4i64(
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
@@ -1176,7 +1176,7 @@ define <4 x i64> @test_bitreverse_v4i64(
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_bitreverse_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
@@ -1190,7 +1190,7 @@ define <4 x i64> @test_bitreverse_v4i64(
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v4i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [87,86,85,84,83,82,81,80,95,94,93,92,91,90,89,88]
; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
@@ -1199,7 +1199,7 @@ define <4 x i64> @test_bitreverse_v4i64(
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v4i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [87,86,85,84,83,82,81,80,95,94,93,92,91,90,89,88]
; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
@@ -1212,7 +1212,7 @@ define <4 x i64> @test_bitreverse_v4i64(
define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v64i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm13 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm13, %xmm5
@@ -1315,7 +1315,7 @@ define <64 x i8> @test_bitreverse_v64i8(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v64i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa %xmm0, %xmm5
; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: pand %xmm8, %xmm0
@@ -1359,7 +1359,7 @@ define <64 x i8> @test_bitreverse_v64i8(
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v64i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
@@ -1394,7 +1394,7 @@ define <64 x i8> @test_bitreverse_v64i8(
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v64i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
@@ -1413,7 +1413,7 @@ define <64 x i8> @test_bitreverse_v64i8(
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_bitreverse_v64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
@@ -1432,7 +1432,7 @@ define <64 x i8> @test_bitreverse_v64i8(
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_bitreverse_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
@@ -1445,7 +1445,7 @@ define <64 x i8> @test_bitreverse_v64i8(
; AVX512BW-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v64i8:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]
; XOPAVX1-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
@@ -1458,7 +1458,7 @@ define <64 x i8> @test_bitreverse_v64i8(
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v64i8:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]
; XOPAVX2-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
@@ -1475,7 +1475,7 @@ define <64 x i8> @test_bitreverse_v64i8(
define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v32i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm14, %xmm14
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
@@ -1611,7 +1611,7 @@ define <32 x i16> @test_bitreverse_v32i1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v32i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa %xmm1, %xmm5
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
@@ -1661,7 +1661,7 @@ define <32 x i16> @test_bitreverse_v32i1
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v32i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -1701,7 +1701,7 @@ define <32 x i16> @test_bitreverse_v32i1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v32i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -1723,7 +1723,7 @@ define <32 x i16> @test_bitreverse_v32i1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_bitreverse_v32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; AVX512F-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -1745,7 +1745,7 @@ define <32 x i16> @test_bitreverse_v32i1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_bitreverse_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30,33,32,35,34,37,36,39,38,41,40,43,42,45,44,47,46,49,48,51,50,53,52,55,54,57,56,59,58,61,60,63,62]
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
@@ -1759,7 +1759,7 @@ define <32 x i16> @test_bitreverse_v32i1
; AVX512BW-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v32i16:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [81,80,83,82,85,84,87,86,89,88,91,90,93,92,95,94]
; XOPAVX1-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
@@ -1772,7 +1772,7 @@ define <32 x i16> @test_bitreverse_v32i1
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v32i16:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [81,80,83,82,85,84,87,86,89,88,91,90,93,92,95,94]
; XOPAVX2-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
@@ -1789,7 +1789,7 @@ define <32 x i16> @test_bitreverse_v32i1
define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v16i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm14, %xmm14
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
@@ -1925,7 +1925,7 @@ define <16 x i32> @test_bitreverse_v16i3
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v16i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa %xmm1, %xmm5
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
@@ -1975,7 +1975,7 @@ define <16 x i32> @test_bitreverse_v16i3
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v16i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -2015,7 +2015,7 @@ define <16 x i32> @test_bitreverse_v16i3
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v16i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -2037,7 +2037,7 @@ define <16 x i32> @test_bitreverse_v16i3
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_bitreverse_v16i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrld $24, %zmm0, %zmm1
; AVX512F-NEXT: vpsrld $8, %zmm0, %zmm2
; AVX512F-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm2, %zmm2
@@ -2065,7 +2065,7 @@ define <16 x i32> @test_bitreverse_v16i3
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_bitreverse_v16i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,19,18,17,16,23,22,21,20,27,26,25,24,31,30,29,28,35,34,33,32,39,38,37,36,43,42,41,40,47,46,45,44,51,50,49,48,55,54,53,52,59,58,57,56,63,62,61,60]
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
@@ -2079,7 +2079,7 @@ define <16 x i32> @test_bitreverse_v16i3
; AVX512BW-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v16i32:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [83,82,81,80,87,86,85,84,91,90,89,88,95,94,93,92]
; XOPAVX1-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
@@ -2092,7 +2092,7 @@ define <16 x i32> @test_bitreverse_v16i3
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v16i32:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [83,82,81,80,87,86,85,84,91,90,89,88,95,94,93,92]
; XOPAVX2-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
@@ -2109,7 +2109,7 @@ define <16 x i32> @test_bitreverse_v16i3
define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v8i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm14, %xmm14
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
@@ -2253,7 +2253,7 @@ define <8 x i64> @test_bitreverse_v8i64(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v8i64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa %xmm1, %xmm5
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
@@ -2303,7 +2303,7 @@ define <8 x i64> @test_bitreverse_v8i64(
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v8i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
@@ -2343,7 +2343,7 @@ define <8 x i64> @test_bitreverse_v8i64(
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v8i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -2365,7 +2365,7 @@ define <8 x i64> @test_bitreverse_v8i64(
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_bitreverse_v8i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlq $56, %zmm0, %zmm1
; AVX512F-NEXT: vpsrlq $40, %zmm0, %zmm2
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm2, %zmm2
@@ -2405,7 +2405,7 @@ define <8 x i64> @test_bitreverse_v8i64(
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_bitreverse_v8i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24,39,38,37,36,35,34,33,32,47,46,45,44,43,42,41,40,55,54,53,52,51,50,49,48,63,62,61,60,59,58,57,56]
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
@@ -2419,7 +2419,7 @@ define <8 x i64> @test_bitreverse_v8i64(
; AVX512BW-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v8i64:
-; XOPAVX1: # BB#0:
+; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [87,86,85,84,83,82,81,80,95,94,93,92,91,90,89,88]
; XOPAVX1-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
@@ -2432,7 +2432,7 @@ define <8 x i64> @test_bitreverse_v8i64(
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v8i64:
-; XOPAVX2: # BB#0:
+; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [87,86,85,84,83,82,81,80,95,94,93,92,91,90,89,88]
; XOPAVX2-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
@@ -2453,7 +2453,7 @@ define <8 x i64> @test_bitreverse_v8i64(
define i32 @fold_bitreverse_i32() nounwind {
; ALL-LABEL: fold_bitreverse_i32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movl $16711935, %eax # imm = 0xFF00FF
; ALL-NEXT: retq
%b = call i32 @llvm.bitreverse.i32(i32 4278255360)
@@ -2462,17 +2462,17 @@ define i32 @fold_bitreverse_i32() nounwi
define <16 x i8> @fold_bitreverse_v16i8() nounwind {
; SSE-LABEL: fold_bitreverse_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,255,64,191,32,223,96,159,16,239,80,175,48,207,112,143]
; SSE-NEXT: retq
;
; AVX-LABEL: fold_bitreverse_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,64,191,32,223,96,159,16,239,80,175,48,207,112,143]
; AVX-NEXT: retq
;
; XOP-LABEL: fold_bitreverse_v16i8:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,64,191,32,223,96,159,16,239,80,175,48,207,112,143]
; XOP-NEXT: retq
%b = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> <i8 0, i8 -1, i8 2, i8 -3, i8 4, i8 -5, i8 6, i8 -7, i8 8, i8 -9, i8 10, i8 -11, i8 12, i8 -13, i8 14, i8 -15>)
@@ -2481,18 +2481,18 @@ define <16 x i8> @fold_bitreverse_v16i8(
define <16 x i16> @fold_bitreverse_v16i16() nounwind {
; SSE-LABEL: fold_bitreverse_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,65535,16384,49151,8192,57343,24576,40959]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [4096,61439,20480,45055,12288,53247,28672,36863]
; SSE-NEXT: retq
;
; AVX-LABEL: fold_bitreverse_v16i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,65535,16384,49151,8192,57343,24576,40959,4096,61439,20480,45055,12288,53247,28672,36863]
; AVX-NEXT: retq
;
; XOP-LABEL: fold_bitreverse_v16i16:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovaps {{.*#+}} ymm0 = [0,65535,16384,49151,8192,57343,24576,40959,4096,61439,20480,45055,12288,53247,28672,36863]
; XOP-NEXT: retq
%b = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> <i16 0, i16 -1, i16 2, i16 -3, i16 4, i16 -5, i16 6, i16 -7, i16 8, i16 -9, i16 10, i16 -11, i16 12, i16 -13, i16 14, i16 -15>)
@@ -2501,7 +2501,7 @@ define <16 x i16> @fold_bitreverse_v16i1
define <16 x i32> @fold_bitreverse_v16i32() nounwind {
; SSE-LABEL: fold_bitreverse_v16i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,4294967295,1073741824,3221225471]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [536870912,3758096383,1610612736,2684354559]
; SSE-NEXT: movaps {{.*#+}} xmm2 = [268435456,4026531839,1342177280,2952790015]
@@ -2509,24 +2509,24 @@ define <16 x i32> @fold_bitreverse_v16i3
; SSE-NEXT: retq
;
; AVX1-LABEL: fold_bitreverse_v16i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559]
; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
; AVX1-NEXT: retq
;
; AVX2-LABEL: fold_bitreverse_v16i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps {{.*#+}} ymm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559]
; AVX2-NEXT: vmovaps {{.*#+}} ymm1 = [268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
; AVX2-NEXT: retq
;
; AVX512-LABEL: fold_bitreverse_v16i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vmovaps {{.*#+}} zmm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559,268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
; AVX512-NEXT: retq
;
; XOP-LABEL: fold_bitreverse_v16i32:
-; XOP: # BB#0:
+; XOP: # %bb.0:
; XOP-NEXT: vmovaps {{.*#+}} ymm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559]
; XOP-NEXT: vmovaps {{.*#+}} ymm1 = [268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
; XOP-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/vector-blend.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-blend.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-blend.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-blend.ll Mon Dec 4 09:18:51 2017
@@ -9,24 +9,24 @@
define <4 x float> @vsel_float(<4 x float> %v1, <4 x float> %v2) {
; SSE2-LABEL: vsel_float:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_float:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_float:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_float:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX-NEXT: retq
entry:
@@ -36,24 +36,24 @@ entry:
define <4 x float> @vsel_float2(<4 x float> %v1, <4 x float> %v2) {
; SSE2-LABEL: vsel_float2:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_float2:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_float2:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_float2:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: retq
entry:
@@ -63,29 +63,29 @@ entry:
define <4 x i8> @vsel_4xi8(<4 x i8> %v1, <4 x i8> %v2) {
; SSE2-LABEL: vsel_4xi8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_4xi8:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_4xi8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: vsel_4xi8:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: vsel_4xi8:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; AVX2-NEXT: retq
entry:
@@ -95,31 +95,31 @@ entry:
define <4 x i16> @vsel_4xi16(<4 x i16> %v1, <4 x i16> %v2) {
; SSE2-LABEL: vsel_4xi16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_4xi16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_4xi16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: vsel_4xi16:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: vsel_4xi16:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
; AVX2-NEXT: retq
entry:
@@ -129,31 +129,31 @@ entry:
define <4 x i32> @vsel_i32(<4 x i32> %v1, <4 x i32> %v2) {
; SSE2-LABEL: vsel_i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: vsel_i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: vsel_i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX2-NEXT: retq
entry:
@@ -163,24 +163,24 @@ entry:
define <2 x double> @vsel_double(<2 x double> %v1, <2 x double> %v2) {
; SSE2-LABEL: vsel_double:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_double:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_double:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_double:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX-NEXT: retq
entry:
@@ -190,29 +190,29 @@ entry:
define <2 x i64> @vsel_i64(<2 x i64> %v1, <2 x i64> %v2) {
; SSE2-LABEL: vsel_i64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_i64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSSE3-NEXT: movapd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_i64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: vsel_i64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: vsel_i64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX2-NEXT: retq
entry:
@@ -222,7 +222,7 @@ entry:
define <8 x i16> @vsel_8xi16(<8 x i16> %v1, <8 x i16> %v2) {
; SSE2-LABEL: vsel_8xi16:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [0,65535,65535,65535,0,65535,65535,65535]
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: andnps %xmm0, %xmm2
@@ -231,7 +231,7 @@ define <8 x i16> @vsel_8xi16(<8 x i16> %
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_8xi16:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movaps {{.*#+}} xmm2 = [0,65535,65535,65535,0,65535,65535,65535]
; SSSE3-NEXT: andps %xmm2, %xmm1
; SSSE3-NEXT: andnps %xmm0, %xmm2
@@ -240,12 +240,12 @@ define <8 x i16> @vsel_8xi16(<8 x i16> %
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_8xi16:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_8xi16:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7]
; AVX-NEXT: retq
entry:
@@ -255,7 +255,7 @@ entry:
define <16 x i8> @vsel_i8(<16 x i8> %v1, <16 x i8> %v2) {
; SSE2-LABEL: vsel_i8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps {{.*#+}} xmm2 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255]
; SSE2-NEXT: andps %xmm2, %xmm1
; SSE2-NEXT: andnps %xmm0, %xmm2
@@ -264,14 +264,14 @@ define <16 x i8> @vsel_i8(<16 x i8> %v1,
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_i8:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[12],zero,zero,zero
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = zero,xmm1[1,2,3],zero,xmm1[5,6,7],zero,xmm1[9,10,11],zero,xmm1[13,14,15]
; SSSE3-NEXT: por %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_i8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
@@ -279,7 +279,7 @@ define <16 x i8> @vsel_i8(<16 x i8> %v1,
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_i8:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
@@ -293,7 +293,7 @@ entry:
define <8 x float> @vsel_float8(<8 x float> %v1, <8 x float> %v2) {
; SSE2-LABEL: vsel_float8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
; SSE2-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
; SSE2-NEXT: movaps %xmm2, %xmm0
@@ -301,7 +301,7 @@ define <8 x float> @vsel_float8(<8 x flo
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_float8:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
; SSSE3-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
; SSSE3-NEXT: movaps %xmm2, %xmm0
@@ -309,13 +309,13 @@ define <8 x float> @vsel_float8(<8 x flo
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_float8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_float8:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
; AVX-NEXT: retq
entry:
@@ -325,7 +325,7 @@ entry:
define <8 x i32> @vsel_i328(<8 x i32> %v1, <8 x i32> %v2) {
; SSE2-LABEL: vsel_i328:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
; SSE2-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
; SSE2-NEXT: movaps %xmm2, %xmm0
@@ -333,7 +333,7 @@ define <8 x i32> @vsel_i328(<8 x i32> %v
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_i328:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
; SSSE3-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
; SSSE3-NEXT: movaps %xmm2, %xmm0
@@ -341,13 +341,13 @@ define <8 x i32> @vsel_i328(<8 x i32> %v
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_i328:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_i328:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
; AVX-NEXT: retq
entry:
@@ -357,7 +357,7 @@ entry:
define <8 x double> @vsel_double8(<8 x double> %v1, <8 x double> %v2) {
; SSE2-LABEL: vsel_double8:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
; SSE2-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
; SSE2-NEXT: movapd %xmm4, %xmm0
@@ -367,7 +367,7 @@ define <8 x double> @vsel_double8(<8 x d
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_double8:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
; SSSE3-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
; SSSE3-NEXT: movapd %xmm4, %xmm0
@@ -377,7 +377,7 @@ define <8 x double> @vsel_double8(<8 x d
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_double8:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm4[1]
; SSE41-NEXT: blendpd {{.*#+}} xmm2 = xmm2[0],xmm6[1]
; SSE41-NEXT: movaps %xmm5, %xmm1
@@ -385,7 +385,7 @@ define <8 x double> @vsel_double8(<8 x d
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_double8:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3]
; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm3[1,2,3]
; AVX-NEXT: retq
@@ -396,7 +396,7 @@ entry:
define <8 x i64> @vsel_i648(<8 x i64> %v1, <8 x i64> %v2) {
; SSE2-LABEL: vsel_i648:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
; SSE2-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
; SSE2-NEXT: movapd %xmm4, %xmm0
@@ -406,7 +406,7 @@ define <8 x i64> @vsel_i648(<8 x i64> %v
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_i648:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
; SSSE3-NEXT: movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
; SSSE3-NEXT: movapd %xmm4, %xmm0
@@ -416,7 +416,7 @@ define <8 x i64> @vsel_i648(<8 x i64> %v
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_i648:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
; SSE41-NEXT: movaps %xmm5, %xmm1
@@ -424,13 +424,13 @@ define <8 x i64> @vsel_i648(<8 x i64> %v
; SSE41-NEXT: retq
;
; AVX1-LABEL: vsel_i648:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm3[1,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: vsel_i648:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3,4,5,6,7]
; AVX2-NEXT: retq
@@ -441,7 +441,7 @@ entry:
define <4 x double> @vsel_double4(<4 x double> %v1, <4 x double> %v2) {
; SSE2-LABEL: vsel_double4:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
; SSE2-NEXT: movapd %xmm2, %xmm0
@@ -449,7 +449,7 @@ define <4 x double> @vsel_double4(<4 x d
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_double4:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
; SSSE3-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
; SSSE3-NEXT: movapd %xmm2, %xmm0
@@ -457,13 +457,13 @@ define <4 x double> @vsel_double4(<4 x d
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_double4:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm2[1]
; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm1[0],xmm3[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_double4:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
; AVX-NEXT: retq
entry:
@@ -473,7 +473,7 @@ entry:
define <2 x double> @testa(<2 x double> %x, <2 x double> %y) {
; SSE2-LABEL: testa:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movapd %xmm1, %xmm2
; SSE2-NEXT: cmplepd %xmm0, %xmm2
; SSE2-NEXT: andpd %xmm2, %xmm0
@@ -482,7 +482,7 @@ define <2 x double> @testa(<2 x double>
; SSE2-NEXT: retq
;
; SSSE3-LABEL: testa:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movapd %xmm1, %xmm2
; SSSE3-NEXT: cmplepd %xmm0, %xmm2
; SSSE3-NEXT: andpd %xmm2, %xmm0
@@ -491,7 +491,7 @@ define <2 x double> @testa(<2 x double>
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testa:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movapd %xmm0, %xmm2
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: cmplepd %xmm2, %xmm0
@@ -500,7 +500,7 @@ define <2 x double> @testa(<2 x double>
; SSE41-NEXT: retq
;
; AVX-LABEL: testa:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcmplepd %xmm0, %xmm1, %xmm2
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
@@ -512,7 +512,7 @@ entry:
define <2 x double> @testb(<2 x double> %x, <2 x double> %y) {
; SSE2-LABEL: testb:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movapd %xmm1, %xmm2
; SSE2-NEXT: cmpnlepd %xmm0, %xmm2
; SSE2-NEXT: andpd %xmm2, %xmm0
@@ -521,7 +521,7 @@ define <2 x double> @testb(<2 x double>
; SSE2-NEXT: retq
;
; SSSE3-LABEL: testb:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movapd %xmm1, %xmm2
; SSSE3-NEXT: cmpnlepd %xmm0, %xmm2
; SSSE3-NEXT: andpd %xmm2, %xmm0
@@ -530,7 +530,7 @@ define <2 x double> @testb(<2 x double>
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testb:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movapd %xmm0, %xmm2
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: cmpnlepd %xmm2, %xmm0
@@ -539,7 +539,7 @@ define <2 x double> @testb(<2 x double>
; SSE41-NEXT: retq
;
; AVX-LABEL: testb:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vcmpnlepd %xmm0, %xmm1, %xmm2
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
@@ -553,27 +553,27 @@ entry:
; blend instruction with an immediate mask
define <4 x double> @constant_blendvpd_avx(<4 x double> %xy, <4 x double> %ab) {
; SSE2-LABEL: constant_blendvpd_avx:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: movapd %xmm3, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: constant_blendvpd_avx:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
; SSSE3-NEXT: movaps %xmm2, %xmm0
; SSSE3-NEXT: movapd %xmm3, %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: constant_blendvpd_avx:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm1[0],xmm3[1]
; SSE41-NEXT: movaps %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: constant_blendvpd_avx:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3]
; AVX-NEXT: retq
entry:
@@ -583,7 +583,7 @@ entry:
define <8 x float> @constant_blendvps_avx(<8 x float> %xyzw, <8 x float> %abcd) {
; SSE2-LABEL: constant_blendvps_avx:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm2[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm3[2,0]
@@ -593,7 +593,7 @@ define <8 x float> @constant_blendvps_av
; SSE2-NEXT: retq
;
; SSSE3-LABEL: constant_blendvps_avx:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm2[2,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm3[2,0]
@@ -603,13 +603,13 @@ define <8 x float> @constant_blendvps_av
; SSSE3-NEXT: retq
;
; SSE41-LABEL: constant_blendvps_avx:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: constant_blendvps_avx:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
; AVX-NEXT: retq
entry:
@@ -619,7 +619,7 @@ entry:
define <32 x i8> @constant_pblendvb_avx2(<32 x i8> %xyzw, <32 x i8> %abcd) {
; SSE2-LABEL: constant_pblendvb_avx2:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movaps {{.*#+}} xmm4 = [255,255,0,255,0,0,0,255,255,255,0,255,0,0,0,255]
; SSE2-NEXT: movaps %xmm4, %xmm5
; SSE2-NEXT: andnps %xmm0, %xmm5
@@ -633,7 +633,7 @@ define <32 x i8> @constant_pblendvb_avx2
; SSE2-NEXT: retq
;
; SSSE3-LABEL: constant_pblendvb_avx2:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [128,128,2,128,4,5,6,128,128,128,10,128,12,13,14,128]
; SSSE3-NEXT: pshufb %xmm4, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [0,1,128,3,128,128,128,7,8,9,128,11,128,128,128,15]
@@ -645,7 +645,7 @@ define <32 x i8> @constant_pblendvb_avx2
; SSSE3-NEXT: retq
;
; SSE41-LABEL: constant_pblendvb_avx2:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm4
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0]
; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
@@ -655,7 +655,7 @@ define <32 x i8> @constant_pblendvb_avx2
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_pblendvb_avx2:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [255,255,0,255,0,0,0,255,255,255,0,255,0,0,0,255,255,255,0,255,0,0,0,255,255,255,0,255,0,0,0,255]
; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0
; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
@@ -663,7 +663,7 @@ define <32 x i8> @constant_pblendvb_avx2
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_pblendvb_avx2:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
@@ -678,24 +678,24 @@ declare <4 x double> @llvm.x86.avx.blend
;; 4 tests for shufflevectors that optimize to blend + immediate
define <4 x float> @blend_shufflevector_4xfloat(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: blend_shufflevector_4xfloat:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_shufflevector_4xfloat:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_shufflevector_4xfloat:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: blend_shufflevector_4xfloat:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX-NEXT: retq
entry:
@@ -705,7 +705,7 @@ entry:
define <8 x float> @blend_shufflevector_8xfloat(<8 x float> %a, <8 x float> %b) {
; SSE2-LABEL: blend_shufflevector_8xfloat:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[0,2]
@@ -714,7 +714,7 @@ define <8 x float> @blend_shufflevector_
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_shufflevector_8xfloat:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[0,2]
@@ -723,13 +723,13 @@ define <8 x float> @blend_shufflevector_
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_shufflevector_8xfloat:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: blend_shufflevector_8xfloat:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5],ymm0[6],ymm1[7]
; AVX-NEXT: retq
entry:
@@ -739,24 +739,24 @@ entry:
define <4 x double> @blend_shufflevector_4xdouble(<4 x double> %a, <4 x double> %b) {
; SSE2-LABEL: blend_shufflevector_4xdouble:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
; SSE2-NEXT: movapd %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_shufflevector_4xdouble:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
; SSSE3-NEXT: movapd %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_shufflevector_4xdouble:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm2[1]
; SSE41-NEXT: retq
;
; AVX-LABEL: blend_shufflevector_4xdouble:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
; AVX-NEXT: retq
entry:
@@ -766,30 +766,30 @@ entry:
define <4 x i64> @blend_shufflevector_4xi64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: blend_shufflevector_4xi64:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
; SSE2-NEXT: movaps %xmm3, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_shufflevector_4xi64:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
; SSSE3-NEXT: movaps %xmm3, %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_shufflevector_4xi64:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: movaps %xmm3, %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: blend_shufflevector_4xi64:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: blend_shufflevector_4xi64:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
; AVX2-NEXT: retq
entry:
@@ -799,7 +799,7 @@ entry:
define <4 x i32> @blend_logic_v4i32(<4 x i32> %b, <4 x i32> %a, <4 x i32> %c) {
; SSE2-LABEL: blend_logic_v4i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: pandn %xmm2, %xmm0
@@ -807,7 +807,7 @@ define <4 x i32> @blend_logic_v4i32(<4 x
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_logic_v4i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: psrad $31, %xmm0
; SSSE3-NEXT: pand %xmm0, %xmm1
; SSSE3-NEXT: pandn %xmm2, %xmm0
@@ -815,14 +815,14 @@ define <4 x i32> @blend_logic_v4i32(<4 x
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_logic_v4i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: psrad $31, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: blend_logic_v4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm0
; AVX-NEXT: retq
@@ -838,7 +838,7 @@ entry:
define <8 x i32> @blend_logic_v8i32(<8 x i32> %b, <8 x i32> %a, <8 x i32> %c) {
; SSE2-LABEL: blend_logic_v8i32:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm3
@@ -850,7 +850,7 @@ define <8 x i32> @blend_logic_v8i32(<8 x
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_logic_v8i32:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: psrad $31, %xmm0
; SSSE3-NEXT: psrad $31, %xmm1
; SSSE3-NEXT: pand %xmm1, %xmm3
@@ -862,7 +862,7 @@ define <8 x i32> @blend_logic_v8i32(<8 x
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_logic_v8i32:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: psrad $31, %xmm1
; SSE41-NEXT: psrad $31, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm4
@@ -873,7 +873,7 @@ define <8 x i32> @blend_logic_v8i32(<8 x
; SSE41-NEXT: retq
;
; AVX1-LABEL: blend_logic_v8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
@@ -884,7 +884,7 @@ define <8 x i32> @blend_logic_v8i32(<8 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: blend_logic_v8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
; AVX2-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
; AVX2-NEXT: retq
@@ -900,14 +900,14 @@ entry:
define <4 x i32> @blend_neg_logic_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: blend_neg_logic_v4i32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: blend_neg_logic_v4i32:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpsrad $31, %xmm1, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
@@ -924,7 +924,7 @@ entry:
define <8 x i32> @blend_neg_logic_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE-LABEL: blend_neg_logic_v8i32:
-; SSE: # BB#0: # %entry
+; SSE: # %bb.0: # %entry
; SSE-NEXT: psrad $31, %xmm3
; SSE-NEXT: psrad $31, %xmm2
; SSE-NEXT: pxor %xmm2, %xmm0
@@ -934,7 +934,7 @@ define <8 x i32> @blend_neg_logic_v8i32(
; SSE-NEXT: retq
;
; AVX1-LABEL: blend_neg_logic_v8i32:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
@@ -950,7 +950,7 @@ define <8 x i32> @blend_neg_logic_v8i32(
; AVX1-NEXT: retq
;
; AVX2-LABEL: blend_neg_logic_v8i32:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpsrad $31, %ymm1, %ymm1
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
@@ -967,7 +967,7 @@ entry:
define <4 x i32> @blend_neg_logic_v4i32_2(<4 x i32> %v, <4 x i32> %c) {
; SSE2-LABEL: blend_neg_logic_v4i32_2:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: pxor %xmm1, %xmm0
; SSE2-NEXT: psubd %xmm0, %xmm1
@@ -975,7 +975,7 @@ define <4 x i32> @blend_neg_logic_v4i32_
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_neg_logic_v4i32_2:
-; SSSE3: # BB#0: # %entry
+; SSSE3: # %bb.0: # %entry
; SSSE3-NEXT: psrad $31, %xmm1
; SSSE3-NEXT: pxor %xmm1, %xmm0
; SSSE3-NEXT: psubd %xmm0, %xmm1
@@ -983,7 +983,7 @@ define <4 x i32> @blend_neg_logic_v4i32_
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_neg_logic_v4i32_2:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: pxor %xmm3, %xmm3
; SSE41-NEXT: psubd %xmm2, %xmm3
@@ -993,7 +993,7 @@ define <4 x i32> @blend_neg_logic_v4i32_
; SSE41-NEXT: retq
;
; AVX-LABEL: blend_neg_logic_v4i32_2:
-; AVX: # BB#0: # %entry
+; AVX: # %bb.0: # %entry
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm2
; AVX-NEXT: vblendvps %xmm1, %xmm0, %xmm2, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/vector-compare-all_of.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-compare-all_of.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-compare-all_of.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-compare-all_of.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@
define i64 @test_v2f64_sext(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_v2f64_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm0, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE-NEXT: pand %xmm1, %xmm0
@@ -14,7 +14,7 @@ define i64 @test_v2f64_sext(<2 x double>
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2f64_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vandpd %xmm1, %xmm0, %xmm0
@@ -22,7 +22,7 @@ define i64 @test_v2f64_sext(<2 x double>
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v2f64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %xmm0, %xmm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -40,7 +40,7 @@ define i64 @test_v2f64_sext(<2 x double>
define i64 @test_v4f64_sext(<4 x double> %a0, <4 x double> %a1) {
; SSE-LABEL: test_v4f64_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm1, %xmm3
; SSE-NEXT: cmpltpd %xmm0, %xmm2
; SSE-NEXT: andpd %xmm3, %xmm2
@@ -50,7 +50,7 @@ define i64 @test_v4f64_sext(<4 x double>
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f64_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: vmovmskpd %ymm0, %eax
; AVX-NEXT: xorl %ecx, %ecx
@@ -61,7 +61,7 @@ define i64 @test_v4f64_sext(<4 x double>
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
@@ -84,7 +84,7 @@ define i64 @test_v4f64_sext(<4 x double>
define i64 @test_v4f64_legal_sext(<4 x double> %a0, <4 x double> %a1) {
; SSE-LABEL: test_v4f64_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm1, %xmm3
; SSE-NEXT: cmpltpd %xmm0, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -97,7 +97,7 @@ define i64 @test_v4f64_legal_sext(<4 x d
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f64_legal_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -111,7 +111,7 @@ define i64 @test_v4f64_legal_sext(<4 x d
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f64_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -136,7 +136,7 @@ define i64 @test_v4f64_legal_sext(<4 x d
define i32 @test_v4f32_sext(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_v4f32_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm0, %xmm1
; SSE-NEXT: movmskps %xmm1, %eax
; SSE-NEXT: xorl %ecx, %ecx
@@ -146,7 +146,7 @@ define i32 @test_v4f32_sext(<4 x float>
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f32_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovmskps %xmm0, %eax
; AVX-NEXT: xorl %ecx, %ecx
@@ -156,7 +156,7 @@ define i32 @test_v4f32_sext(<4 x float>
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f32_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %xmm0, %xmm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -178,7 +178,7 @@ define i32 @test_v4f32_sext(<4 x float>
define i32 @test_v8f32_sext(<8 x float> %a0, <8 x float> %a1) {
; SSE-LABEL: test_v8f32_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm1, %xmm3
; SSE-NEXT: cmpltps %xmm0, %xmm2
; SSE-NEXT: andps %xmm3, %xmm2
@@ -190,7 +190,7 @@ define i32 @test_v8f32_sext(<8 x float>
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8f32_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX-NEXT: vmovmskps %ymm0, %eax
; AVX-NEXT: xorl %ecx, %ecx
@@ -201,7 +201,7 @@ define i32 @test_v8f32_sext(<8 x float>
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8f32_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -228,7 +228,7 @@ define i32 @test_v8f32_sext(<8 x float>
define i32 @test_v8f32_legal_sext(<8 x float> %a0, <8 x float> %a1) {
; SSE-LABEL: test_v8f32_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm1, %xmm3
; SSE-NEXT: cmpltps %xmm0, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -240,7 +240,7 @@ define i32 @test_v8f32_legal_sext(<8 x f
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8f32_legal_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -253,7 +253,7 @@ define i32 @test_v8f32_legal_sext(<8 x f
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8f32_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k0
; AVX512-NEXT: vpmovm2w %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -281,7 +281,7 @@ define i32 @test_v8f32_legal_sext(<8 x f
define i64 @test_v2i64_sext(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_v2i64_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtq %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE-NEXT: pand %xmm0, %xmm1
@@ -289,7 +289,7 @@ define i64 @test_v2i64_sext(<2 x i64> %a
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2i64_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -297,7 +297,7 @@ define i64 @test_v2i64_sext(<2 x i64> %a
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v2i64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -315,7 +315,7 @@ define i64 @test_v2i64_sext(<2 x i64> %a
define i64 @test_v4i64_sext(<4 x i64> %a0, <4 x i64> %a1) {
; SSE-LABEL: test_v4i64_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtq %xmm3, %xmm1
; SSE-NEXT: pcmpgtq %xmm2, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
@@ -325,7 +325,7 @@ define i64 @test_v4i64_sext(<4 x i64> %a
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v4i64_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -340,7 +340,7 @@ define i64 @test_v4i64_sext(<4 x i64> %a
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v4i64_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskpd %ymm0, %eax
; AVX2-NEXT: xorl %ecx, %ecx
@@ -351,7 +351,7 @@ define i64 @test_v4i64_sext(<4 x i64> %a
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4i64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
@@ -374,7 +374,7 @@ define i64 @test_v4i64_sext(<4 x i64> %a
define i64 @test_v4i64_legal_sext(<4 x i64> %a0, <4 x i64> %a1) {
; SSE-LABEL: test_v4i64_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtq %xmm3, %xmm1
; SSE-NEXT: pcmpgtq %xmm2, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
@@ -387,7 +387,7 @@ define i64 @test_v4i64_legal_sext(<4 x i
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v4i64_legal_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -403,7 +403,7 @@ define i64 @test_v4i64_legal_sext(<4 x i
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v4i64_legal_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -417,7 +417,7 @@ define i64 @test_v4i64_legal_sext(<4 x i
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4i64_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -442,7 +442,7 @@ define i64 @test_v4i64_legal_sext(<4 x i
define i32 @test_v4i32_sext(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: test_v4i32_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm1, %xmm0
; SSE-NEXT: movmskps %xmm0, %eax
; SSE-NEXT: xorl %ecx, %ecx
@@ -452,7 +452,7 @@ define i32 @test_v4i32_sext(<4 x i32> %a
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4i32_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovmskps %xmm0, %eax
; AVX-NEXT: xorl %ecx, %ecx
@@ -462,7 +462,7 @@ define i32 @test_v4i32_sext(<4 x i32> %a
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4i32_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -484,7 +484,7 @@ define i32 @test_v4i32_sext(<4 x i32> %a
define i32 @test_v8i32_sext(<8 x i32> %a0, <8 x i32> %a1) {
; SSE-LABEL: test_v8i32_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm3, %xmm1
; SSE-NEXT: pcmpgtd %xmm2, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
@@ -496,7 +496,7 @@ define i32 @test_v8i32_sext(<8 x i32> %a
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v8i32_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
@@ -511,7 +511,7 @@ define i32 @test_v8i32_sext(<8 x i32> %a
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v8i32_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskps %ymm0, %eax
; AVX2-NEXT: xorl %ecx, %ecx
@@ -522,7 +522,7 @@ define i32 @test_v8i32_sext(<8 x i32> %a
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8i32_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -549,7 +549,7 @@ define i32 @test_v8i32_sext(<8 x i32> %a
define i32 @test_v8i32_legal_sext(<8 x i32> %a0, <8 x i32> %a1) {
; SSE-LABEL: test_v8i32_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm3, %xmm1
; SSE-NEXT: pcmpgtd %xmm2, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
@@ -561,7 +561,7 @@ define i32 @test_v8i32_legal_sext(<8 x i
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v8i32_legal_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
@@ -576,7 +576,7 @@ define i32 @test_v8i32_legal_sext(<8 x i
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v8i32_legal_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -589,7 +589,7 @@ define i32 @test_v8i32_legal_sext(<8 x i
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8i32_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; AVX512-NEXT: vpmovm2w %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -617,7 +617,7 @@ define i32 @test_v8i32_legal_sext(<8 x i
define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_v8i16_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm1, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: xorl %ecx, %ecx
@@ -628,7 +628,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8i16_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpmovmskb %xmm0, %eax
; AVX-NEXT: xorl %ecx, %ecx
@@ -639,7 +639,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8i16_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; AVX512-NEXT: vpmovm2w %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -665,7 +665,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a
define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; SSE-LABEL: test_v16i16_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm3, %xmm1
; SSE-NEXT: pcmpgtw %xmm2, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
@@ -678,7 +678,7 @@ define i16 @test_v16i16_sext(<16 x i16>
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
@@ -697,7 +697,7 @@ define i16 @test_v16i16_sext(<16 x i16>
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v16i16_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %ecx
; AVX2-NEXT: xorl %eax, %eax
@@ -708,7 +708,7 @@ define i16 @test_v16i16_sext(<16 x i16>
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16i16_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
; AVX512-NEXT: vpmovm2w %k0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -739,7 +739,7 @@ define i16 @test_v16i16_sext(<16 x i16>
define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; SSE-LABEL: test_v16i16_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm3, %xmm1
; SSE-NEXT: pcmpgtw %xmm2, %xmm0
; SSE-NEXT: packsswb %xmm1, %xmm0
@@ -752,7 +752,7 @@ define i16 @test_v16i16_legal_sext(<16 x
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_legal_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
@@ -768,7 +768,7 @@ define i16 @test_v16i16_legal_sext(<16 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v16i16_legal_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -782,7 +782,7 @@ define i16 @test_v16i16_legal_sext(<16 x
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16i16_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
; AVX512-NEXT: vpmovm2b %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -815,7 +815,7 @@ define i16 @test_v16i16_legal_sext(<16 x
define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: test_v16i8_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm1, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: xorl %ecx, %ecx
@@ -826,7 +826,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v16i8_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpmovmskb %xmm0, %eax
; AVX-NEXT: xorl %ecx, %ecx
@@ -837,7 +837,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v16i8_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
; AVX512-NEXT: vpmovm2b %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -867,7 +867,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0
define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; SSE-LABEL: test_v32i8_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm3, %xmm1
; SSE-NEXT: pcmpgtb %xmm2, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
@@ -880,7 +880,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v32i8_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
@@ -901,7 +901,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v32i8_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %ecx
; AVX2-NEXT: xorl %eax, %eax
@@ -912,7 +912,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v32i8_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtb %ymm1, %ymm0, %k0
; AVX512-NEXT: vpmovm2b %k0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
Modified: llvm/trunk/test/CodeGen/X86/vector-compare-any_of.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-compare-any_of.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-compare-any_of.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-compare-any_of.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@
define i64 @test_v2f64_sext(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_v2f64_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm0, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE-NEXT: por %xmm1, %xmm0
@@ -14,7 +14,7 @@ define i64 @test_v2f64_sext(<2 x double>
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2f64_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
@@ -22,7 +22,7 @@ define i64 @test_v2f64_sext(<2 x double>
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v2f64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %xmm0, %xmm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -40,7 +40,7 @@ define i64 @test_v2f64_sext(<2 x double>
define i64 @test_v4f64_sext(<4 x double> %a0, <4 x double> %a1) {
; SSE-LABEL: test_v4f64_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm1, %xmm3
; SSE-NEXT: cmpltpd %xmm0, %xmm2
; SSE-NEXT: orpd %xmm3, %xmm2
@@ -50,7 +50,7 @@ define i64 @test_v4f64_sext(<4 x double>
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f64_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: vmovmskpd %ymm0, %eax
; AVX-NEXT: negl %eax
@@ -59,7 +59,7 @@ define i64 @test_v4f64_sext(<4 x double>
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
@@ -82,7 +82,7 @@ define i64 @test_v4f64_sext(<4 x double>
define i64 @test_v4f64_legal_sext(<4 x double> %a0, <4 x double> %a1) {
; SSE-LABEL: test_v4f64_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm1, %xmm3
; SSE-NEXT: cmpltpd %xmm0, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -93,7 +93,7 @@ define i64 @test_v4f64_legal_sext(<4 x d
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f64_legal_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -105,7 +105,7 @@ define i64 @test_v4f64_legal_sext(<4 x d
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f64_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -130,7 +130,7 @@ define i64 @test_v4f64_legal_sext(<4 x d
define i32 @test_v4f32_sext(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: test_v4f32_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm0, %xmm1
; SSE-NEXT: movmskps %xmm1, %eax
; SSE-NEXT: negl %eax
@@ -138,7 +138,7 @@ define i32 @test_v4f32_sext(<4 x float>
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f32_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovmskps %xmm0, %eax
; AVX-NEXT: negl %eax
@@ -146,7 +146,7 @@ define i32 @test_v4f32_sext(<4 x float>
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f32_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %xmm0, %xmm1, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -168,7 +168,7 @@ define i32 @test_v4f32_sext(<4 x float>
define i32 @test_v8f32_sext(<8 x float> %a0, <8 x float> %a1) {
; SSE-LABEL: test_v8f32_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm1, %xmm3
; SSE-NEXT: cmpltps %xmm0, %xmm2
; SSE-NEXT: orps %xmm3, %xmm2
@@ -178,7 +178,7 @@ define i32 @test_v8f32_sext(<8 x float>
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8f32_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX-NEXT: vmovmskps %ymm0, %eax
; AVX-NEXT: negl %eax
@@ -187,7 +187,7 @@ define i32 @test_v8f32_sext(<8 x float>
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8f32_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -214,7 +214,7 @@ define i32 @test_v8f32_sext(<8 x float>
define i32 @test_v8f32_legal_sext(<8 x float> %a0, <8 x float> %a1) {
; SSE-LABEL: test_v8f32_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm1, %xmm3
; SSE-NEXT: cmpltps %xmm0, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -224,7 +224,7 @@ define i32 @test_v8f32_legal_sext(<8 x f
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8f32_legal_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -235,7 +235,7 @@ define i32 @test_v8f32_legal_sext(<8 x f
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8f32_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k0
; AVX512-NEXT: vpmovm2w %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -263,7 +263,7 @@ define i32 @test_v8f32_legal_sext(<8 x f
define i64 @test_v2i64_sext(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_v2i64_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtq %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE-NEXT: por %xmm0, %xmm1
@@ -271,7 +271,7 @@ define i64 @test_v2i64_sext(<2 x i64> %a
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2i64_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
@@ -279,7 +279,7 @@ define i64 @test_v2i64_sext(<2 x i64> %a
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v2i64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
@@ -297,7 +297,7 @@ define i64 @test_v2i64_sext(<2 x i64> %a
define i64 @test_v4i64_sext(<4 x i64> %a0, <4 x i64> %a1) {
; SSE-LABEL: test_v4i64_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtq %xmm3, %xmm1
; SSE-NEXT: pcmpgtq %xmm2, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
@@ -307,7 +307,7 @@ define i64 @test_v4i64_sext(<4 x i64> %a
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v4i64_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -320,7 +320,7 @@ define i64 @test_v4i64_sext(<4 x i64> %a
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v4i64_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskpd %ymm0, %eax
; AVX2-NEXT: negl %eax
@@ -329,7 +329,7 @@ define i64 @test_v4i64_sext(<4 x i64> %a
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4i64_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
@@ -352,7 +352,7 @@ define i64 @test_v4i64_sext(<4 x i64> %a
define i64 @test_v4i64_legal_sext(<4 x i64> %a0, <4 x i64> %a1) {
; SSE-LABEL: test_v4i64_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtq %xmm3, %xmm1
; SSE-NEXT: pcmpgtq %xmm2, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
@@ -363,7 +363,7 @@ define i64 @test_v4i64_legal_sext(<4 x i
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v4i64_legal_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -377,7 +377,7 @@ define i64 @test_v4i64_legal_sext(<4 x i
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v4i64_legal_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -389,7 +389,7 @@ define i64 @test_v4i64_legal_sext(<4 x i
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4i64_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -414,7 +414,7 @@ define i64 @test_v4i64_legal_sext(<4 x i
define i32 @test_v4i32_sext(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: test_v4i32_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm1, %xmm0
; SSE-NEXT: movmskps %xmm0, %eax
; SSE-NEXT: negl %eax
@@ -422,7 +422,7 @@ define i32 @test_v4i32_sext(<4 x i32> %a
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4i32_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovmskps %xmm0, %eax
; AVX-NEXT: negl %eax
@@ -430,7 +430,7 @@ define i32 @test_v4i32_sext(<4 x i32> %a
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4i32_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
@@ -452,7 +452,7 @@ define i32 @test_v4i32_sext(<4 x i32> %a
define i32 @test_v8i32_sext(<8 x i32> %a0, <8 x i32> %a1) {
; SSE-LABEL: test_v8i32_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm3, %xmm1
; SSE-NEXT: pcmpgtd %xmm2, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
@@ -462,7 +462,7 @@ define i32 @test_v8i32_sext(<8 x i32> %a
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v8i32_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
@@ -475,7 +475,7 @@ define i32 @test_v8i32_sext(<8 x i32> %a
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v8i32_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskps %ymm0, %eax
; AVX2-NEXT: negl %eax
@@ -484,7 +484,7 @@ define i32 @test_v8i32_sext(<8 x i32> %a
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8i32_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
@@ -511,7 +511,7 @@ define i32 @test_v8i32_sext(<8 x i32> %a
define i32 @test_v8i32_legal_sext(<8 x i32> %a0, <8 x i32> %a1) {
; SSE-LABEL: test_v8i32_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm3, %xmm1
; SSE-NEXT: pcmpgtd %xmm2, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
@@ -521,7 +521,7 @@ define i32 @test_v8i32_legal_sext(<8 x i
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v8i32_legal_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
@@ -534,7 +534,7 @@ define i32 @test_v8i32_legal_sext(<8 x i
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v8i32_legal_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -545,7 +545,7 @@ define i32 @test_v8i32_legal_sext(<8 x i
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8i32_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; AVX512-NEXT: vpmovm2w %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -573,7 +573,7 @@ define i32 @test_v8i32_legal_sext(<8 x i
define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-LABEL: test_v8i16_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm1, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
@@ -582,7 +582,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8i16_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpmovmskb %xmm0, %eax
; AVX-NEXT: negl %eax
@@ -591,7 +591,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8i16_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; AVX512-NEXT: vpmovm2w %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -617,7 +617,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a
define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; SSE-LABEL: test_v16i16_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm3, %xmm1
; SSE-NEXT: pcmpgtw %xmm2, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
@@ -628,7 +628,7 @@ define i16 @test_v16i16_sext(<16 x i16>
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
@@ -647,7 +647,7 @@ define i16 @test_v16i16_sext(<16 x i16>
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v16i16_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: negl %eax
@@ -657,7 +657,7 @@ define i16 @test_v16i16_sext(<16 x i16>
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16i16_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
; AVX512-NEXT: vpmovm2w %k0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -688,7 +688,7 @@ define i16 @test_v16i16_sext(<16 x i16>
define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; SSE-LABEL: test_v16i16_legal_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm3, %xmm1
; SSE-NEXT: pcmpgtw %xmm2, %xmm0
; SSE-NEXT: packsswb %xmm1, %xmm0
@@ -699,7 +699,7 @@ define i16 @test_v16i16_legal_sext(<16 x
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_legal_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
@@ -713,7 +713,7 @@ define i16 @test_v16i16_legal_sext(<16 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v16i16_legal_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -725,7 +725,7 @@ define i16 @test_v16i16_legal_sext(<16 x
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16i16_legal_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
; AVX512-NEXT: vpmovm2b %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -758,7 +758,7 @@ define i16 @test_v16i16_legal_sext(<16 x
define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-LABEL: test_v16i8_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm1, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
@@ -767,7 +767,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v16i8_sext:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpmovmskb %xmm0, %eax
; AVX-NEXT: negl %eax
@@ -776,7 +776,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v16i8_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
; AVX512-NEXT: vpmovm2b %k0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
@@ -806,7 +806,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0
define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; SSE-LABEL: test_v32i8_sext:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm3, %xmm1
; SSE-NEXT: pcmpgtb %xmm2, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
@@ -817,7 +817,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v32i8_sext:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
@@ -838,7 +838,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v32i8_sext:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: negl %eax
@@ -848,7 +848,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v32i8_sext:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtb %ymm1, %ymm0, %k0
; AVX512-NEXT: vpmovm2b %k0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
Modified: llvm/trunk/test/CodeGen/X86/vector-compare-combines.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-compare-combines.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-compare-combines.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-compare-combines.ll Mon Dec 4 09:18:51 2017
@@ -9,12 +9,12 @@ declare <4 x i32> @llvm.x86.sse41.pmaxsd
define <4 x i32> @PR27924_cmpeq(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: PR27924_cmpeq:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: PR27924_cmpeq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%cmp = icmp sgt <4 x i32> %a, %b
@@ -27,12 +27,12 @@ define <4 x i32> @PR27924_cmpeq(<4 x i32
define <4 x i32> @PR27924_cmpgt(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: PR27924_cmpgt:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: PR27924_cmpgt:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%cmp = icmp sgt <4 x i32> %a, %b
Modified: llvm/trunk/test/CodeGen/X86/vector-compare-results.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-compare-results.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-compare-results.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-compare-results.ll Mon Dec 4 09:18:51 2017
@@ -13,13 +13,13 @@
define <2 x i1> @test_cmp_v2f64(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_cmp_v2f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_cmp_v2f64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fcmp ogt <2 x double> %a0, %a1
@@ -28,13 +28,13 @@ define <2 x i1> @test_cmp_v2f64(<2 x dou
define <4 x i1> @test_cmp_v4f32(<4 x float> %a0, <4 x float> %a1) nounwind {
; SSE-LABEL: test_cmp_v4f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_cmp_v4f32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = fcmp ogt <4 x float> %a0, %a1
@@ -43,7 +43,7 @@ define <4 x i1> @test_cmp_v4f32(<4 x flo
define <2 x i1> @test_cmp_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE2-LABEL: test_cmp_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -58,12 +58,12 @@ define <2 x i1> @test_cmp_v2i64(<2 x i64
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: test_cmp_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <2 x i64> %a0, %a1
@@ -72,12 +72,12 @@ define <2 x i1> @test_cmp_v2i64(<2 x i64
define <4 x i1> @test_cmp_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; SSE-LABEL: test_cmp_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_cmp_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <4 x i32> %a0, %a1
@@ -86,12 +86,12 @@ define <4 x i1> @test_cmp_v4i32(<4 x i32
define <8 x i1> @test_cmp_v8i16(<8 x i16> %a0, <8 x i16> %a1) nounwind {
; SSE-LABEL: test_cmp_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_cmp_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <8 x i16> %a0, %a1
@@ -100,12 +100,12 @@ define <8 x i1> @test_cmp_v8i16(<8 x i16
define <16 x i1> @test_cmp_v16i8(<16 x i8> %a0, <16 x i8> %a1) nounwind {
; SSE-LABEL: test_cmp_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_cmp_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = icmp sgt <16 x i8> %a0, %a1
@@ -118,7 +118,7 @@ define <16 x i1> @test_cmp_v16i8(<16 x i
define <4 x i1> @test_cmp_v4f64(<4 x double> %a0, <4 x double> %a1) nounwind {
; SSE-LABEL: test_cmp_v4f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm1, %xmm3
; SSE-NEXT: cmpltpd %xmm0, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -126,7 +126,7 @@ define <4 x i1> @test_cmp_v4f64(<4 x dou
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -134,7 +134,7 @@ define <4 x i1> @test_cmp_v4f64(<4 x dou
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -142,7 +142,7 @@ define <4 x i1> @test_cmp_v4f64(<4 x dou
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v4f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -154,7 +154,7 @@ define <4 x i1> @test_cmp_v4f64(<4 x dou
define <8 x i1> @test_cmp_v8f32(<8 x float> %a0, <8 x float> %a1) nounwind {
; SSE-LABEL: test_cmp_v8f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm1, %xmm3
; SSE-NEXT: cmpltps %xmm0, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -162,7 +162,7 @@ define <8 x i1> @test_cmp_v8f32(<8 x flo
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v8f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -170,7 +170,7 @@ define <8 x i1> @test_cmp_v8f32(<8 x flo
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v8f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -178,7 +178,7 @@ define <8 x i1> @test_cmp_v8f32(<8 x flo
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v8f32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -190,7 +190,7 @@ define <8 x i1> @test_cmp_v8f32(<8 x flo
define <4 x i1> @test_cmp_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE2-LABEL: test_cmp_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm4, %xmm3
; SSE2-NEXT: pxor %xmm4, %xmm1
@@ -216,14 +216,14 @@ define <4 x i1> @test_cmp_v4i64(<4 x i64
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm3, %xmm1
; SSE42-NEXT: pcmpgtq %xmm2, %xmm0
; SSE42-NEXT: packssdw %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
@@ -233,7 +233,7 @@ define <4 x i1> @test_cmp_v4i64(<4 x i64
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -241,7 +241,7 @@ define <4 x i1> @test_cmp_v4i64(<4 x i64
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v4i64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -253,14 +253,14 @@ define <4 x i1> @test_cmp_v4i64(<4 x i64
define <8 x i1> @test_cmp_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-LABEL: test_cmp_v8i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm3, %xmm1
; SSE-NEXT: pcmpgtd %xmm2, %xmm0
; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
@@ -270,7 +270,7 @@ define <8 x i1> @test_cmp_v8i32(<8 x i32
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
@@ -278,7 +278,7 @@ define <8 x i1> @test_cmp_v8i32(<8 x i32
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v8i32:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -290,14 +290,14 @@ define <8 x i1> @test_cmp_v8i32(<8 x i32
define <16 x i1> @test_cmp_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; SSE-LABEL: test_cmp_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm3, %xmm1
; SSE-NEXT: pcmpgtw %xmm2, %xmm0
; SSE-NEXT: packsswb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
@@ -307,7 +307,7 @@ define <16 x i1> @test_cmp_v16i16(<16 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
@@ -315,7 +315,7 @@ define <16 x i1> @test_cmp_v16i16(<16 x
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v16i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -323,7 +323,7 @@ define <16 x i1> @test_cmp_v16i16(<16 x
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -331,7 +331,7 @@ define <16 x i1> @test_cmp_v16i16(<16 x
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -343,7 +343,7 @@ define <16 x i1> @test_cmp_v16i16(<16 x
define <32 x i1> @test_cmp_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
; SSE2-LABEL: test_cmp_v32i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpgtb %xmm2, %xmm0
; SSE2-NEXT: pcmpgtb %xmm3, %xmm1
; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
@@ -448,7 +448,7 @@ define <32 x i1> @test_cmp_v32i8(<32 x i
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v32i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtb %xmm2, %xmm0
; SSE42-NEXT: pcmpgtb %xmm3, %xmm1
; SSE42-NEXT: pextrb $15, %xmm1, %eax
@@ -551,7 +551,7 @@ define <32 x i1> @test_cmp_v32i8(<32 x i
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
@@ -560,12 +560,12 @@ define <32 x i1> @test_cmp_v32i8(<32 x i
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = icmp sgt <32 x i8> %a0, %a1
@@ -578,7 +578,7 @@ define <32 x i1> @test_cmp_v32i8(<32 x i
define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
; SSE-LABEL: test_cmp_v8f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltpd %xmm3, %xmm7
; SSE-NEXT: cmpltpd %xmm2, %xmm6
; SSE-NEXT: packssdw %xmm7, %xmm6
@@ -590,7 +590,7 @@ define <8 x i1> @test_cmp_v8f64(<8 x dou
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v8f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
@@ -602,7 +602,7 @@ define <8 x i1> @test_cmp_v8f64(<8 x dou
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v8f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
@@ -613,7 +613,7 @@ define <8 x i1> @test_cmp_v8f64(<8 x dou
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v8f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
@@ -621,7 +621,7 @@ define <8 x i1> @test_cmp_v8f64(<8 x dou
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v8f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
@@ -629,7 +629,7 @@ define <8 x i1> @test_cmp_v8f64(<8 x dou
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v8f64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -641,7 +641,7 @@ define <8 x i1> @test_cmp_v8f64(<8 x dou
define <16 x i1> @test_cmp_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind {
; SSE-LABEL: test_cmp_v16f32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: cmpltps %xmm3, %xmm7
; SSE-NEXT: cmpltps %xmm2, %xmm6
; SSE-NEXT: packssdw %xmm7, %xmm6
@@ -653,7 +653,7 @@ define <16 x i1> @test_cmp_v16f32(<16 x
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
@@ -665,7 +665,7 @@ define <16 x i1> @test_cmp_v16f32(<16 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vcmpltps %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
@@ -676,7 +676,7 @@ define <16 x i1> @test_cmp_v16f32(<16 x
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v16f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -684,7 +684,7 @@ define <16 x i1> @test_cmp_v16f32(<16 x
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -692,7 +692,7 @@ define <16 x i1> @test_cmp_v16f32(<16 x
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16f32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -704,7 +704,7 @@ define <16 x i1> @test_cmp_v16f32(<16 x
define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE2-LABEL: test_cmp_v8i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm8, %xmm7
; SSE2-NEXT: pxor %xmm8, %xmm3
@@ -752,7 +752,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v8i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm7, %xmm3
; SSE42-NEXT: pcmpgtq %xmm6, %xmm2
; SSE42-NEXT: packssdw %xmm3, %xmm2
@@ -763,7 +763,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v8i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -779,7 +779,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v8i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
@@ -790,7 +790,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v8i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
@@ -798,7 +798,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v8i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
@@ -806,7 +806,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v8i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -818,7 +818,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64
define <16 x i1> @test_cmp_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; SSE-LABEL: test_cmp_v16i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm7, %xmm3
; SSE-NEXT: pcmpgtd %xmm6, %xmm2
; SSE-NEXT: packssdw %xmm3, %xmm2
@@ -829,7 +829,7 @@ define <16 x i1> @test_cmp_v16i32(<16 x
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtd %xmm4, %xmm5, %xmm4
@@ -845,7 +845,7 @@ define <16 x i1> @test_cmp_v16i32(<16 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
@@ -856,7 +856,7 @@ define <16 x i1> @test_cmp_v16i32(<16 x
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v16i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -864,7 +864,7 @@ define <16 x i1> @test_cmp_v16i32(<16 x
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -872,7 +872,7 @@ define <16 x i1> @test_cmp_v16i32(<16 x
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -884,7 +884,7 @@ define <16 x i1> @test_cmp_v16i32(<16 x
define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; SSE2-LABEL: test_cmp_v32i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpgtw %xmm5, %xmm1
; SSE2-NEXT: pcmpgtw %xmm4, %xmm0
; SSE2-NEXT: packsswb %xmm1, %xmm0
@@ -993,7 +993,7 @@ define <32 x i1> @test_cmp_v32i16(<32 x
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v32i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtw %xmm4, %xmm0
; SSE42-NEXT: pcmpgtw %xmm5, %xmm1
; SSE42-NEXT: pcmpgtw %xmm6, %xmm2
@@ -1098,7 +1098,7 @@ define <32 x i1> @test_cmp_v32i16(<32 x
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtw %xmm4, %xmm5, %xmm4
@@ -1113,7 +1113,7 @@ define <32 x i1> @test_cmp_v32i16(<32 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
@@ -1121,7 +1121,7 @@ define <32 x i1> @test_cmp_v32i16(<32 x
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
@@ -1132,7 +1132,7 @@ define <32 x i1> @test_cmp_v32i16(<32 x
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v32i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
@@ -1143,7 +1143,7 @@ define <32 x i1> @test_cmp_v32i16(<32 x
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -1154,7 +1154,7 @@ define <32 x i1> @test_cmp_v32i16(<32 x
define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; SSE2-LABEL: test_cmp_v64i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpgtb %xmm4, %xmm0
; SSE2-NEXT: pcmpgtb %xmm5, %xmm1
; SSE2-NEXT: pcmpgtb %xmm6, %xmm2
@@ -1359,7 +1359,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v64i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtb %xmm4, %xmm0
; SSE42-NEXT: pcmpgtb %xmm5, %xmm1
; SSE42-NEXT: pcmpgtb %xmm6, %xmm2
@@ -1560,7 +1560,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v64i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -1766,7 +1766,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v64i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
@@ -1968,7 +1968,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1979,7 +1979,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
; AVX512DQ-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1990,7 +1990,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: retq
@@ -2004,7 +2004,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i
define <16 x i1> @test_cmp_v16f64(<16 x double> %a0, <16 x double> %a1) nounwind {
; SSE-LABEL: test_cmp_v16f64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movapd %xmm0, %xmm8
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm11
@@ -2032,7 +2032,7 @@ define <16 x i1> @test_cmp_v16f64(<16 x
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltpd %ymm3, %ymm7, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm7
; AVX1-NEXT: vpackssdw %xmm7, %xmm3, %xmm3
@@ -2052,7 +2052,7 @@ define <16 x i1> @test_cmp_v16f64(<16 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltpd %ymm3, %ymm7, %ymm3
; AVX2-NEXT: vcmpltpd %ymm2, %ymm6, %ymm2
; AVX2-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
@@ -2069,7 +2069,7 @@ define <16 x i1> @test_cmp_v16f64(<16 x
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v16f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm2, %k0
; AVX512F-NEXT: vcmpltpd %zmm1, %zmm3, %k1
; AVX512F-NEXT: kunpckbw %k0, %k1, %k1
@@ -2079,7 +2079,7 @@ define <16 x i1> @test_cmp_v16f64(<16 x
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm2, %k0
; AVX512DQ-NEXT: vcmpltpd %zmm1, %zmm3, %k1
; AVX512DQ-NEXT: kunpckbw %k0, %k1, %k0
@@ -2089,7 +2089,7 @@ define <16 x i1> @test_cmp_v16f64(<16 x
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16f64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm2, %k0
; AVX512BW-NEXT: vcmpltpd %zmm1, %zmm3, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
@@ -2103,7 +2103,7 @@ define <16 x i1> @test_cmp_v16f64(<16 x
define <32 x i1> @test_cmp_v32f32(<32 x float> %a0, <32 x float> %a1) nounwind {
; SSE2-LABEL: test_cmp_v32f32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{[0-9]+}}(%rsp), %xmm9
; SSE2-NEXT: movaps {{[0-9]+}}(%rsp), %xmm11
; SSE2-NEXT: movaps {{[0-9]+}}(%rsp), %xmm10
@@ -2228,7 +2228,7 @@ define <32 x i1> @test_cmp_v32f32(<32 x
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v32f32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm15
; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm14
; SSE42-NEXT: movaps {{[0-9]+}}(%rsp), %xmm13
@@ -2345,7 +2345,7 @@ define <32 x i1> @test_cmp_v32f32(<32 x
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltps %ymm3, %ymm7, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm7
; AVX1-NEXT: vpackssdw %xmm7, %xmm3, %xmm3
@@ -2364,7 +2364,7 @@ define <32 x i1> @test_cmp_v32f32(<32 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcmpltps %ymm3, %ymm7, %ymm3
; AVX2-NEXT: vcmpltps %ymm2, %ymm6, %ymm2
; AVX2-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
@@ -2378,7 +2378,7 @@ define <32 x i1> @test_cmp_v32f32(<32 x
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v32f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %zmm1, %zmm3, %k0
; AVX512F-NEXT: kshiftlw $14, %k0, %k1
; AVX512F-NEXT: kshiftrw $15, %k1, %k1
@@ -2515,7 +2515,7 @@ define <32 x i1> @test_cmp_v32f32(<32 x
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v32f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcmpltps %zmm1, %zmm3, %k0
; AVX512DQ-NEXT: kshiftlw $14, %k0, %k1
; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
@@ -2652,7 +2652,7 @@ define <32 x i1> @test_cmp_v32f32(<32 x
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v32f32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm2, %k0
; AVX512BW-NEXT: vcmpltps %zmm1, %zmm3, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
@@ -2665,7 +2665,7 @@ define <32 x i1> @test_cmp_v32f32(<32 x
define <16 x i1> @test_cmp_v16i64(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; SSE2-LABEL: test_cmp_v16i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm8, %xmm7
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
@@ -2764,7 +2764,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v16i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm7
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm6
; SSE42-NEXT: packssdw %xmm7, %xmm6
@@ -2783,7 +2783,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm9
; AVX1-NEXT: vpcmpgtq %xmm8, %xmm9, %xmm8
@@ -2811,7 +2811,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm7, %ymm3, %ymm3
; AVX2-NEXT: vpcmpgtq %ymm6, %ymm2, %ymm2
; AVX2-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
@@ -2828,7 +2828,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v16i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %zmm2, %zmm0, %k0
; AVX512F-NEXT: vpcmpgtq %zmm3, %zmm1, %k1
; AVX512F-NEXT: kunpckbw %k0, %k1, %k1
@@ -2838,7 +2838,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v16i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtq %zmm2, %zmm0, %k0
; AVX512DQ-NEXT: vpcmpgtq %zmm3, %zmm1, %k1
; AVX512DQ-NEXT: kunpckbw %k0, %k1, %k0
@@ -2848,7 +2848,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v16i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %zmm2, %zmm0, %k0
; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
@@ -2862,7 +2862,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x
define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind {
; SSE2-LABEL: test_cmp_v32i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm3
; SSE2-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm2
; SSE2-NEXT: packssdw %xmm3, %xmm2
@@ -2979,7 +2979,7 @@ define <32 x i1> @test_cmp_v32i32(<32 x
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v32i32:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm0
; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm1
; SSE42-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm2
@@ -3088,7 +3088,7 @@ define <32 x i1> @test_cmp_v32i32(<32 x
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm9
; AVX1-NEXT: vpcmpgtd %xmm8, %xmm9, %xmm8
@@ -3115,7 +3115,7 @@ define <32 x i1> @test_cmp_v32i32(<32 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm7, %ymm3, %ymm3
; AVX2-NEXT: vpcmpgtd %ymm6, %ymm2, %ymm2
; AVX2-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
@@ -3129,7 +3129,7 @@ define <32 x i1> @test_cmp_v32i32(<32 x
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v32i32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %zmm3, %zmm1, %k0
; AVX512F-NEXT: kshiftlw $14, %k0, %k1
; AVX512F-NEXT: kshiftrw $15, %k1, %k1
@@ -3266,7 +3266,7 @@ define <32 x i1> @test_cmp_v32i32(<32 x
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v32i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtd %zmm3, %zmm1, %k0
; AVX512DQ-NEXT: kshiftlw $14, %k0, %k1
; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
@@ -3403,7 +3403,7 @@ define <32 x i1> @test_cmp_v32i32(<32 x
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v32i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %zmm2, %zmm0, %k0
; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
@@ -3416,7 +3416,7 @@ define <32 x i1> @test_cmp_v32i32(<32 x
define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
; SSE2-LABEL: test_cmp_v64i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm1
; SSE2-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm0
; SSE2-NEXT: packsswb %xmm1, %xmm0
@@ -3629,7 +3629,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v64i16:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm0
; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm1
; SSE42-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm2
@@ -3834,7 +3834,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v64i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtw %xmm4, %xmm0, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -4048,7 +4048,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v64i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtw %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
; AVX2-NEXT: vpcmpgtw %ymm5, %ymm1, %ymm1
@@ -4254,7 +4254,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v64i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtw %ymm7, %ymm3, %ymm3
; AVX512F-NEXT: vpmovsxwd %ymm3, %zmm3
; AVX512F-NEXT: vpslld $31, %zmm3, %zmm3
@@ -4541,7 +4541,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v64i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtw %ymm7, %ymm3, %ymm3
; AVX512DQ-NEXT: vpmovsxwd %ymm3, %zmm3
; AVX512DQ-NEXT: vpslld $31, %zmm3, %zmm3
@@ -4828,7 +4828,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v64i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %zmm2, %zmm0, %k0
; AVX512BW-NEXT: vpcmpgtw %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckdq %k0, %k1, %k0
@@ -4840,7 +4840,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x
define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
; SSE2-LABEL: test_cmp_v128i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm0
; SSE2-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm1
@@ -5247,7 +5247,7 @@ define <128 x i1> @test_cmp_v128i8(<128
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v128i8:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm0
; SSE42-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm1
; SSE42-NEXT: pcmpgtb {{[0-9]+}}(%rsp), %xmm2
@@ -5644,7 +5644,7 @@ define <128 x i1> @test_cmp_v128i8(<128
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v128i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtb %xmm4, %xmm0, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -6050,7 +6050,7 @@ define <128 x i1> @test_cmp_v128i8(<128
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v128i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtb %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpcmpgtb %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtb %ymm6, %ymm2, %ymm2
@@ -6448,7 +6448,7 @@ define <128 x i1> @test_cmp_v128i8(<128
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v128i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtb %ymm4, %ymm0, %ymm0
; AVX512F-NEXT: vpcmpgtb %ymm5, %ymm1, %ymm1
; AVX512F-NEXT: vpcmpgtb %ymm6, %ymm2, %ymm2
@@ -6494,7 +6494,7 @@ define <128 x i1> @test_cmp_v128i8(<128
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v128i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtb %ymm4, %ymm0, %ymm0
; AVX512DQ-NEXT: vpcmpgtb %ymm5, %ymm1, %ymm1
; AVX512DQ-NEXT: vpcmpgtb %ymm6, %ymm2, %ymm2
@@ -6540,7 +6540,7 @@ define <128 x i1> @test_cmp_v128i8(<128
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v128i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtb %zmm3, %zmm1, %k0
; AVX512BW-NEXT: vpcmpgtb %zmm2, %zmm0, %k1
; AVX512BW-NEXT: vpmovm2b %k1, %zmm0
@@ -6556,7 +6556,7 @@ define <128 x i1> @test_cmp_v128i8(<128
define <32 x i1> @test_cmp_v32f64(<32 x double> %a0, <32 x double> %a1) nounwind {
; SSE2-LABEL: test_cmp_v32f64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm8
; SSE2-NEXT: cmpltpd %xmm5, %xmm8
; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm5
@@ -6725,7 +6725,7 @@ define <32 x i1> @test_cmp_v32f64(<32 x
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v32f64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pushq %rbp
; SSE42-NEXT: pushq %r15
; SSE42-NEXT: pushq %r14
@@ -6904,7 +6904,7 @@ define <32 x i1> @test_cmp_v32f64(<32 x
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -6953,7 +6953,7 @@ define <32 x i1> @test_cmp_v32f64(<32 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
@@ -6993,7 +6993,7 @@ define <32 x i1> @test_cmp_v32f64(<32 x
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v32f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %zmm2, %zmm6, %k0
; AVX512F-NEXT: kshiftlw $14, %k0, %k1
; AVX512F-NEXT: kshiftrw $15, %k1, %k1
@@ -7134,7 +7134,7 @@ define <32 x i1> @test_cmp_v32f64(<32 x
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v32f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcmpltpd %zmm2, %zmm6, %k0
; AVX512DQ-NEXT: kshiftlb $6, %k0, %k1
; AVX512DQ-NEXT: kshiftrb $7, %k1, %k1
@@ -7271,7 +7271,7 @@ define <32 x i1> @test_cmp_v32f64(<32 x
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v32f64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm4, %k0
; AVX512BW-NEXT: vcmpltpd %zmm1, %zmm5, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
@@ -7288,7 +7288,7 @@ define <32 x i1> @test_cmp_v32f64(<32 x
define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
; SSE2-LABEL: test_cmp_v32i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm8, %xmm5
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
@@ -7622,7 +7622,7 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v32i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
; SSE42-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
@@ -7781,7 +7781,7 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v32i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
@@ -7846,7 +7846,7 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
@@ -7878,7 +7878,7 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_cmp_v32i64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %zmm6, %zmm2, %k0
; AVX512F-NEXT: kshiftlw $14, %k0, %k1
; AVX512F-NEXT: kshiftrw $15, %k1, %k1
@@ -8019,7 +8019,7 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: test_cmp_v32i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpcmpgtq %zmm6, %zmm2, %k0
; AVX512DQ-NEXT: kshiftlb $6, %k0, %k1
; AVX512DQ-NEXT: kshiftrb $7, %k1, %k1
@@ -8156,7 +8156,7 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_cmp_v32i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %zmm4, %zmm0, %k0
; AVX512BW-NEXT: vpcmpgtq %zmm5, %zmm1, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
Modified: llvm/trunk/test/CodeGen/X86/vector-extend-inreg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-extend-inreg.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-extend-inreg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-extend-inreg.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@
define i64 @extract_any_extend_vector_inreg_v16i64(<16 x i64> %a0, i32 %a1) nounwind {
; X32-SSE-LABEL: extract_any_extend_vector_inreg_v16i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pushl %ebp
; X32-SSE-NEXT: movl %esp, %ebp
; X32-SSE-NEXT: andl $-128, %esp
@@ -42,7 +42,7 @@ define i64 @extract_any_extend_vector_in
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: extract_any_extend_vector_inreg_v16i64:
-; X64-SSE: # BB#0:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: pushq %rbp
; X64-SSE-NEXT: movq %rsp, %rbp
; X64-SSE-NEXT: andq $-128, %rsp
@@ -65,7 +65,7 @@ define i64 @extract_any_extend_vector_in
; X64-SSE-NEXT: retq
;
; X32-AVX-LABEL: extract_any_extend_vector_inreg_v16i64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: pushl %ebp
; X32-AVX-NEXT: movl %esp, %ebp
; X32-AVX-NEXT: andl $-128, %esp
@@ -94,7 +94,7 @@ define i64 @extract_any_extend_vector_in
; X32-AVX-NEXT: retl
;
; X64-AVX-LABEL: extract_any_extend_vector_inreg_v16i64:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: pushq %rbp
; X64-AVX-NEXT: movq %rsp, %rbp
; X64-AVX-NEXT: andq $-128, %rsp
Modified: llvm/trunk/test/CodeGen/X86/vector-half-conversions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-half-conversions.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-half-conversions.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-half-conversions.ll Mon Dec 4 09:18:51 2017
@@ -10,7 +10,7 @@
define float @cvt_i16_to_f32(i16 %a0) nounwind {
; ALL-LABEL: cvt_i16_to_f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl %di, %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -22,7 +22,7 @@ define float @cvt_i16_to_f32(i16 %a0) no
define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_4i16_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: movq %rax, %rcx
@@ -49,7 +49,7 @@ define <4 x float> @cvt_4i16_to_4f32(<4
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4i16_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: movq %rax, %rcx
@@ -76,7 +76,7 @@ define <4 x float> @cvt_4i16_to_4f32(<4
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_4i16_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512F-NEXT: vmovq %xmm0, %rax
; AVX512F-NEXT: movq %rax, %rcx
@@ -103,7 +103,7 @@ define <4 x float> @cvt_4i16_to_4f32(<4
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_4i16_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovdw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; AVX512VL-NEXT: movq %rax, %rcx
@@ -135,7 +135,7 @@ define <4 x float> @cvt_4i16_to_4f32(<4
define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_8i16_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movq %rax, %rdx
@@ -161,7 +161,7 @@ define <4 x float> @cvt_8i16_to_4f32(<8
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_8i16_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movq %rax, %rdx
@@ -187,7 +187,7 @@ define <4 x float> @cvt_8i16_to_4f32(<8
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_8i16_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovq %xmm0, %rax
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movq %rax, %rdx
@@ -213,7 +213,7 @@ define <4 x float> @cvt_8i16_to_4f32(<8
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_8i16_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512VL-NEXT: vpmovdw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
@@ -247,7 +247,7 @@ define <4 x float> @cvt_8i16_to_4f32(<8
define <8 x float> @cvt_8i16_to_8f32(<8 x i16> %a0) nounwind {
; ALL-LABEL: cvt_8i16_to_8f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpextrq $1, %xmm0, %rdx
; ALL-NEXT: movq %rdx, %r8
; ALL-NEXT: movq %rdx, %r10
@@ -301,7 +301,7 @@ define <8 x float> @cvt_8i16_to_8f32(<8
define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_16i16_to_16f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vmovq %xmm4, %rax
; AVX1-NEXT: movq %rax, %rcx
@@ -396,7 +396,7 @@ define <16 x float> @cvt_16i16_to_16f32(
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_16i16_to_16f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
; AVX2-NEXT: vmovq %xmm4, %rax
; AVX2-NEXT: movq %rax, %rcx
@@ -491,7 +491,7 @@ define <16 x float> @cvt_16i16_to_16f32(
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_16i16_to_16f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm10
; AVX512F-NEXT: vmovq %xmm0, %rax
; AVX512F-NEXT: movq %rax, %rcx
@@ -587,7 +587,7 @@ define <16 x float> @cvt_16i16_to_16f32(
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_16i16_to_16f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm10
; AVX512VL-NEXT: vmovq %xmm0, %rax
; AVX512VL-NEXT: movq %rax, %rcx
@@ -692,7 +692,7 @@ define <16 x float> @cvt_16i16_to_16f32(
define float @load_cvt_i16_to_f32(i16* %a0) nounwind {
; ALL-LABEL: load_cvt_i16_to_f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl (%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -705,7 +705,7 @@ define float @load_cvt_i16_to_f32(i16* %
define <4 x float> @load_cvt_4i16_to_4f32(<4 x i16>* %a0) nounwind {
; ALL-LABEL: load_cvt_4i16_to_4f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl 6(%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -730,7 +730,7 @@ define <4 x float> @load_cvt_4i16_to_4f3
define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind {
; AVX1-LABEL: load_cvt_8i16_to_4f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movq (%rdi), %rax
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movq %rax, %rdx
@@ -756,7 +756,7 @@ define <4 x float> @load_cvt_8i16_to_4f3
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_cvt_8i16_to_4f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movq (%rdi), %rax
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movq %rax, %rdx
@@ -782,7 +782,7 @@ define <4 x float> @load_cvt_8i16_to_4f3
; AVX2-NEXT: retq
;
; AVX512F-LABEL: load_cvt_8i16_to_4f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: movq (%rdi), %rax
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movq %rax, %rdx
@@ -808,7 +808,7 @@ define <4 x float> @load_cvt_8i16_to_4f3
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: load_cvt_8i16_to_4f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX512VL-NEXT: vpmovdw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
@@ -843,7 +843,7 @@ define <4 x float> @load_cvt_8i16_to_4f3
define <8 x float> @load_cvt_8i16_to_8f32(<8 x i16>* %a0) nounwind {
; ALL-LABEL: load_cvt_8i16_to_8f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl 6(%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -884,7 +884,7 @@ define <8 x float> @load_cvt_8i16_to_8f3
define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
; AVX1-LABEL: load_cvt_16i16_to_16f32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movswl 22(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm8
@@ -950,7 +950,7 @@ define <16 x float> @load_cvt_16i16_to_1
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_cvt_16i16_to_16f32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movswl 22(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm8
@@ -1016,7 +1016,7 @@ define <16 x float> @load_cvt_16i16_to_1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: load_cvt_16i16_to_16f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: movswl 6(%rdi), %eax
; AVX512F-NEXT: vmovd %eax, %xmm0
; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm8
@@ -1083,7 +1083,7 @@ define <16 x float> @load_cvt_16i16_to_1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: load_cvt_16i16_to_16f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: movswl 6(%rdi), %eax
; AVX512VL-NEXT: vmovd %eax, %xmm0
; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm8
@@ -1160,7 +1160,7 @@ define <16 x float> @load_cvt_16i16_to_1
define double @cvt_i16_to_f64(i16 %a0) nounwind {
; ALL-LABEL: cvt_i16_to_f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl %di, %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -1173,7 +1173,7 @@ define double @cvt_i16_to_f64(i16 %a0) n
define <2 x double> @cvt_2i16_to_2f64(<2 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_2i16_to_2f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX1-NEXT: vmovd %xmm0, %eax
@@ -1190,7 +1190,7 @@ define <2 x double> @cvt_2i16_to_2f64(<2
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_2i16_to_2f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX2-NEXT: vmovd %xmm0, %eax
@@ -1207,7 +1207,7 @@ define <2 x double> @cvt_2i16_to_2f64(<2
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_2i16_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX512F-NEXT: vmovd %xmm0, %eax
@@ -1224,7 +1224,7 @@ define <2 x double> @cvt_2i16_to_2f64(<2
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_2i16_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovqw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; AVX512VL-NEXT: movswl %ax, %ecx
@@ -1245,7 +1245,7 @@ define <2 x double> @cvt_2i16_to_2f64(<2
define <4 x double> @cvt_4i16_to_4f64(<4 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_4i16_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: movq %rax, %rcx
@@ -1275,7 +1275,7 @@ define <4 x double> @cvt_4i16_to_4f64(<4
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4i16_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: movq %rax, %rcx
@@ -1305,7 +1305,7 @@ define <4 x double> @cvt_4i16_to_4f64(<4
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_4i16_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512F-NEXT: vmovq %xmm0, %rax
; AVX512F-NEXT: movq %rax, %rcx
@@ -1335,7 +1335,7 @@ define <4 x double> @cvt_4i16_to_4f64(<4
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_4i16_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovdw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; AVX512VL-NEXT: movq %rax, %rcx
@@ -1370,7 +1370,7 @@ define <4 x double> @cvt_4i16_to_4f64(<4
define <2 x double> @cvt_8i16_to_2f64(<8 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_8i16_to_2f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: movswl %ax, %ecx
; AVX1-NEXT: shrl $16, %eax
@@ -1385,7 +1385,7 @@ define <2 x double> @cvt_8i16_to_2f64(<8
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_8i16_to_2f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: movswl %ax, %ecx
; AVX2-NEXT: shrl $16, %eax
@@ -1400,7 +1400,7 @@ define <2 x double> @cvt_8i16_to_2f64(<8
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_8i16_to_2f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovd %xmm0, %eax
; AVX512F-NEXT: movswl %ax, %ecx
; AVX512F-NEXT: shrl $16, %eax
@@ -1415,7 +1415,7 @@ define <2 x double> @cvt_8i16_to_2f64(<8
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_8i16_to_2f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; AVX512VL-NEXT: vpmovqw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movl -{{[0-9]+}}(%rsp), %eax
@@ -1438,7 +1438,7 @@ define <2 x double> @cvt_8i16_to_2f64(<8
define <4 x double> @cvt_8i16_to_4f64(<8 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_8i16_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movl %eax, %edx
@@ -1467,7 +1467,7 @@ define <4 x double> @cvt_8i16_to_4f64(<8
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_8i16_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movl %eax, %edx
@@ -1496,7 +1496,7 @@ define <4 x double> @cvt_8i16_to_4f64(<8
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_8i16_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovq %xmm0, %rax
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movl %eax, %edx
@@ -1525,7 +1525,7 @@ define <4 x double> @cvt_8i16_to_4f64(<8
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_8i16_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512VL-NEXT: vpmovdw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
@@ -1562,7 +1562,7 @@ define <4 x double> @cvt_8i16_to_4f64(<8
define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
; AVX1-LABEL: cvt_8i16_to_8f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovq %xmm0, %rdx
; AVX1-NEXT: movq %rdx, %r9
; AVX1-NEXT: movl %edx, %r10d
@@ -1616,7 +1616,7 @@ define <8 x double> @cvt_8i16_to_8f64(<8
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_8i16_to_8f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovq %xmm0, %rdx
; AVX2-NEXT: movq %rdx, %r9
; AVX2-NEXT: movl %edx, %r10d
@@ -1670,7 +1670,7 @@ define <8 x double> @cvt_8i16_to_8f64(<8
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_8i16_to_8f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
; AVX512-NEXT: movq %rdx, %r9
; AVX512-NEXT: movl %edx, %r10d
@@ -1734,7 +1734,7 @@ define <8 x double> @cvt_8i16_to_8f64(<8
define double @load_cvt_i16_to_f64(i16* %a0) nounwind {
; ALL-LABEL: load_cvt_i16_to_f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl (%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -1748,7 +1748,7 @@ define double @load_cvt_i16_to_f64(i16*
define <2 x double> @load_cvt_2i16_to_2f64(<2 x i16>* %a0) nounwind {
; ALL-LABEL: load_cvt_2i16_to_2f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl (%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -1767,7 +1767,7 @@ define <2 x double> @load_cvt_2i16_to_2f
define <4 x double> @load_cvt_4i16_to_4f64(<4 x i16>* %a0) nounwind {
; ALL-LABEL: load_cvt_4i16_to_4f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: movswl (%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -1796,7 +1796,7 @@ define <4 x double> @load_cvt_4i16_to_4f
define <4 x double> @load_cvt_8i16_to_4f64(<8 x i16>* %a0) nounwind {
; AVX1-LABEL: load_cvt_8i16_to_4f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movq (%rdi), %rax
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movl %eax, %edx
@@ -1825,7 +1825,7 @@ define <4 x double> @load_cvt_8i16_to_4f
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_cvt_8i16_to_4f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movq (%rdi), %rax
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movl %eax, %edx
@@ -1854,7 +1854,7 @@ define <4 x double> @load_cvt_8i16_to_4f
; AVX2-NEXT: retq
;
; AVX512F-LABEL: load_cvt_8i16_to_4f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: movq (%rdi), %rax
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movl %eax, %edx
@@ -1883,7 +1883,7 @@ define <4 x double> @load_cvt_8i16_to_4f
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: load_cvt_8i16_to_4f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX512VL-NEXT: vpmovdw %xmm0, -{{[0-9]+}}(%rsp)
; AVX512VL-NEXT: movq -{{[0-9]+}}(%rsp), %rax
@@ -1921,7 +1921,7 @@ define <4 x double> @load_cvt_8i16_to_4f
define <8 x double> @load_cvt_8i16_to_8f64(<8 x i16>* %a0) nounwind {
; AVX1-LABEL: load_cvt_8i16_to_8f64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: movswl 8(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm1
@@ -1963,7 +1963,7 @@ define <8 x double> @load_cvt_8i16_to_8f
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_cvt_8i16_to_8f64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: movswl 8(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm1
@@ -2005,7 +2005,7 @@ define <8 x double> @load_cvt_8i16_to_8f
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_cvt_8i16_to_8f64:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: movswl (%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
@@ -2058,7 +2058,7 @@ define <8 x double> @load_cvt_8i16_to_8f
define i16 @cvt_f32_to_i16(float %a0) nounwind {
; ALL-LABEL: cvt_f32_to_i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; ALL-NEXT: vmovd %xmm0, %eax
; ALL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
@@ -2070,7 +2070,7 @@ define i16 @cvt_f32_to_i16(float %a0) no
define <4 x i16> @cvt_4f32_to_4i16(<4 x float> %a0) nounwind {
; ALL-LABEL: cvt_4f32_to_4i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
@@ -2099,7 +2099,7 @@ define <4 x i16> @cvt_4f32_to_4i16(<4 x
define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) nounwind {
; AVX1-LABEL: cvt_4f32_to_8i16_undef:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm1, %eax
@@ -2124,7 +2124,7 @@ define <8 x i16> @cvt_4f32_to_8i16_undef
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4f32_to_8i16_undef:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %eax
@@ -2149,7 +2149,7 @@ define <8 x i16> @cvt_4f32_to_8i16_undef
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_4f32_to_8i16_undef:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512F-NEXT: vmovd %xmm1, %eax
@@ -2174,7 +2174,7 @@ define <8 x i16> @cvt_4f32_to_8i16_undef
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_4f32_to_8i16_undef:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512VL-NEXT: vmovd %xmm1, %eax
@@ -2206,7 +2206,7 @@ define <8 x i16> @cvt_4f32_to_8i16_undef
define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind {
; AVX1-LABEL: cvt_4f32_to_8i16_zero:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm1, %eax
@@ -2231,7 +2231,7 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4f32_to_8i16_zero:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %eax
@@ -2256,7 +2256,7 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_4f32_to_8i16_zero:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512F-NEXT: vmovd %xmm1, %eax
@@ -2281,7 +2281,7 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_4f32_to_8i16_zero:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512VL-NEXT: vmovd %xmm1, %eax
@@ -2315,7 +2315,7 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(
define <8 x i16> @cvt_8f32_to_8i16(<8 x float> %a0) nounwind {
; ALL-LABEL: cvt_8f32_to_8i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
@@ -2367,7 +2367,7 @@ define <8 x i16> @cvt_8f32_to_8i16(<8 x
define <16 x i16> @cvt_16f32_to_16i16(<16 x float> %a0) nounwind {
; AVX1-LABEL: cvt_16f32_to_16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm2
; AVX1-NEXT: vmovd %xmm2, %eax
; AVX1-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
@@ -2434,7 +2434,7 @@ define <16 x i16> @cvt_16f32_to_16i16(<1
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_16f32_to_16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm2
; AVX2-NEXT: vmovd %xmm2, %eax
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
@@ -2501,7 +2501,7 @@ define <16 x i16> @cvt_16f32_to_16i16(<1
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_16f32_to_16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm2
; AVX512-NEXT: vmovd %xmm2, %eax
@@ -2578,7 +2578,7 @@ define <16 x i16> @cvt_16f32_to_16i16(<1
define void @store_cvt_f32_to_i16(float %a0, i16* %a1) nounwind {
; ALL-LABEL: store_cvt_f32_to_i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; ALL-NEXT: vmovd %xmm0, %eax
; ALL-NEXT: movw %ax, (%rdi)
@@ -2591,7 +2591,7 @@ define void @store_cvt_f32_to_i16(float
define void @store_cvt_4f32_to_4i16(<4 x float> %a0, <4 x i16>* %a1) nounwind {
; ALL-LABEL: store_cvt_4f32_to_4i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
@@ -2616,7 +2616,7 @@ define void @store_cvt_4f32_to_4i16(<4 x
define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, <8 x i16>* %a1) nounwind {
; AVX1-LABEL: store_cvt_4f32_to_8i16_undef:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm1, %eax
@@ -2642,7 +2642,7 @@ define void @store_cvt_4f32_to_8i16_unde
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_4f32_to_8i16_undef:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %eax
@@ -2668,7 +2668,7 @@ define void @store_cvt_4f32_to_8i16_unde
; AVX2-NEXT: retq
;
; AVX512F-LABEL: store_cvt_4f32_to_8i16_undef:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512F-NEXT: vmovd %xmm1, %eax
@@ -2694,7 +2694,7 @@ define void @store_cvt_4f32_to_8i16_unde
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: store_cvt_4f32_to_8i16_undef:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512VL-NEXT: vmovd %xmm1, %eax
@@ -2728,7 +2728,7 @@ define void @store_cvt_4f32_to_8i16_unde
define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) nounwind {
; AVX1-LABEL: store_cvt_4f32_to_8i16_zero:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm1, %eax
@@ -2754,7 +2754,7 @@ define void @store_cvt_4f32_to_8i16_zero
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_4f32_to_8i16_zero:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %eax
@@ -2780,7 +2780,7 @@ define void @store_cvt_4f32_to_8i16_zero
; AVX2-NEXT: retq
;
; AVX512F-LABEL: store_cvt_4f32_to_8i16_zero:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512F-NEXT: vmovd %xmm1, %eax
@@ -2806,7 +2806,7 @@ define void @store_cvt_4f32_to_8i16_zero
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: store_cvt_4f32_to_8i16_zero:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512VL-NEXT: vmovd %xmm1, %eax
@@ -2842,7 +2842,7 @@ define void @store_cvt_4f32_to_8i16_zero
define void @store_cvt_8f32_to_8i16(<8 x float> %a0, <8 x i16>* %a1) nounwind {
; ALL-LABEL: store_cvt_8f32_to_8i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %r8d
@@ -2884,7 +2884,7 @@ define void @store_cvt_8f32_to_8i16(<8 x
define void @store_cvt_16f32_to_16i16(<16 x float> %a0, <16 x i16>* %a1) nounwind {
; AVX1-LABEL: store_cvt_16f32_to_16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vcvtps2ph $4, %xmm3, %xmm4
@@ -2951,7 +2951,7 @@ define void @store_cvt_16f32_to_16i16(<1
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_16f32_to_16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX2-NEXT: vcvtps2ph $4, %xmm3, %xmm4
@@ -3018,7 +3018,7 @@ define void @store_cvt_16f32_to_16i16(<1
; AVX2-NEXT: retq
;
; AVX512-LABEL: store_cvt_16f32_to_16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm2
; AVX512-NEXT: vextractf128 $1, %ymm2, %xmm3
@@ -3096,7 +3096,7 @@ define void @store_cvt_16f32_to_16i16(<1
define i16 @cvt_f64_to_i16(double %a0) nounwind {
; ALL-LABEL: cvt_f64_to_i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: jmp __truncdfhf2 # TAILCALL
%1 = fptrunc double %a0 to half
%2 = bitcast half %1 to i16
@@ -3105,7 +3105,7 @@ define i16 @cvt_f64_to_i16(double %a0) n
define <2 x i16> @cvt_2f64_to_2i16(<2 x double> %a0) nounwind {
; ALL-LABEL: cvt_2f64_to_2i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: pushq %rbx
; ALL-NEXT: subq $16, %rsp
; ALL-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
@@ -3128,7 +3128,7 @@ define <2 x i16> @cvt_2f64_to_2i16(<2 x
define <4 x i16> @cvt_4f64_to_4i16(<4 x double> %a0) nounwind {
; AVX1-LABEL: cvt_4f64_to_4i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: subq $40, %rsp
@@ -3165,7 +3165,7 @@ define <4 x i16> @cvt_4f64_to_4i16(<4 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4f64_to_4i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: subq $40, %rsp
@@ -3202,7 +3202,7 @@ define <4 x i16> @cvt_4f64_to_4i16(<4 x
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_4f64_to_4i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: pushq %rbx
; AVX512-NEXT: subq $40, %rsp
@@ -3244,7 +3244,7 @@ define <4 x i16> @cvt_4f64_to_4i16(<4 x
define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX1-LABEL: cvt_4f64_to_8i16_undef:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: subq $40, %rsp
@@ -3282,7 +3282,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4f64_to_8i16_undef:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: subq $40, %rsp
@@ -3320,7 +3320,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_4f64_to_8i16_undef:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %r14
; AVX512F-NEXT: pushq %rbx
; AVX512F-NEXT: subq $40, %rsp
@@ -3358,7 +3358,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_4f64_to_8i16_undef:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: pushq %r14
; AVX512VL-NEXT: pushq %rbx
; AVX512VL-NEXT: subq $40, %rsp
@@ -3403,7 +3403,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef
define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX1-LABEL: cvt_4f64_to_8i16_zero:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: subq $40, %rsp
@@ -3441,7 +3441,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4f64_to_8i16_zero:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: subq $40, %rsp
@@ -3479,7 +3479,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(
; AVX2-NEXT: retq
;
; AVX512F-LABEL: cvt_4f64_to_8i16_zero:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %r14
; AVX512F-NEXT: pushq %rbx
; AVX512F-NEXT: subq $40, %rsp
@@ -3517,7 +3517,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: cvt_4f64_to_8i16_zero:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: pushq %r14
; AVX512VL-NEXT: pushq %rbx
; AVX512VL-NEXT: subq $40, %rsp
@@ -3564,7 +3564,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(
define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX1-LABEL: cvt_8f64_to_8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %r15
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: pushq %rbx
@@ -3631,7 +3631,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_8f64_to_8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %r15
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %rbx
@@ -3698,7 +3698,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_8f64_to_8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: pushq %r15
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: pushq %rbx
@@ -3776,7 +3776,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x
define void @store_cvt_f64_to_i16(double %a0, i16* %a1) nounwind {
; ALL-LABEL: store_cvt_f64_to_i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: pushq %rbx
; ALL-NEXT: movq %rdi, %rbx
; ALL-NEXT: callq __truncdfhf2
@@ -3791,7 +3791,7 @@ define void @store_cvt_f64_to_i16(double
define void @store_cvt_2f64_to_2i16(<2 x double> %a0, <2 x i16>* %a1) nounwind {
; ALL-LABEL: store_cvt_2f64_to_2i16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: pushq %rbp
; ALL-NEXT: pushq %rbx
; ALL-NEXT: subq $24, %rsp
@@ -3816,7 +3816,7 @@ define void @store_cvt_2f64_to_2i16(<2 x
define void @store_cvt_4f64_to_4i16(<4 x double> %a0, <4 x i16>* %a1) nounwind {
; AVX1-LABEL: store_cvt_4f64_to_4i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: pushq %r15
; AVX1-NEXT: pushq %r14
@@ -3854,7 +3854,7 @@ define void @store_cvt_4f64_to_4i16(<4 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_4f64_to_4i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: pushq %r15
; AVX2-NEXT: pushq %r14
@@ -3892,7 +3892,7 @@ define void @store_cvt_4f64_to_4i16(<4 x
; AVX2-NEXT: retq
;
; AVX512-LABEL: store_cvt_4f64_to_4i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: pushq %rbp
; AVX512-NEXT: pushq %r15
; AVX512-NEXT: pushq %r14
@@ -3936,7 +3936,7 @@ define void @store_cvt_4f64_to_4i16(<4 x
define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX1-LABEL: store_cvt_4f64_to_8i16_undef:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: pushq %rbx
@@ -3978,7 +3978,7 @@ define void @store_cvt_4f64_to_8i16_unde
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_4f64_to_8i16_undef:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %rbx
@@ -4020,7 +4020,7 @@ define void @store_cvt_4f64_to_8i16_unde
; AVX2-NEXT: retq
;
; AVX512F-LABEL: store_cvt_4f64_to_8i16_undef:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: pushq %r14
; AVX512F-NEXT: pushq %rbx
@@ -4062,7 +4062,7 @@ define void @store_cvt_4f64_to_8i16_unde
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: store_cvt_4f64_to_8i16_undef:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: pushq %rbp
; AVX512VL-NEXT: pushq %r14
; AVX512VL-NEXT: pushq %rbx
@@ -4112,7 +4112,7 @@ define void @store_cvt_4f64_to_8i16_unde
define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX1-LABEL: store_cvt_4f64_to_8i16_zero:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: pushq %rbx
@@ -4154,7 +4154,7 @@ define void @store_cvt_4f64_to_8i16_zero
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_4f64_to_8i16_zero:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: pushq %rbx
@@ -4196,7 +4196,7 @@ define void @store_cvt_4f64_to_8i16_zero
; AVX2-NEXT: retq
;
; AVX512F-LABEL: store_cvt_4f64_to_8i16_zero:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: pushq %r14
; AVX512F-NEXT: pushq %rbx
@@ -4238,7 +4238,7 @@ define void @store_cvt_4f64_to_8i16_zero
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: store_cvt_4f64_to_8i16_zero:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: pushq %rbp
; AVX512VL-NEXT: pushq %r14
; AVX512VL-NEXT: pushq %rbx
@@ -4290,7 +4290,7 @@ define void @store_cvt_4f64_to_8i16_zero
define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX1-LABEL: store_cvt_8f64_to_8i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: pushq %r15
; AVX1-NEXT: pushq %r14
@@ -4358,7 +4358,7 @@ define void @store_cvt_8f64_to_8i16(<8 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_8f64_to_8i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: pushq %r15
; AVX2-NEXT: pushq %r14
@@ -4426,7 +4426,7 @@ define void @store_cvt_8f64_to_8i16(<8 x
; AVX2-NEXT: retq
;
; AVX512-LABEL: store_cvt_8f64_to_8i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: pushq %rbp
; AVX512-NEXT: pushq %r15
; AVX512-NEXT: pushq %r14
Modified: llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-128.ll Mon Dec 4 09:18:51 2017
@@ -11,7 +11,7 @@
define <2 x i64> @test_div7_2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: test_div7_2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq %xmm0, %rax
; SSE2-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925
; SSE2-NEXT: imulq %rcx
@@ -33,7 +33,7 @@ define <2 x i64> @test_div7_2i64(<2 x i6
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_div7_2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrq $1, %xmm0, %rax
; SSE41-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925
; SSE41-NEXT: imulq %rcx
@@ -53,7 +53,7 @@ define <2 x i64> @test_div7_2i64(<2 x i6
; SSE41-NEXT: retq
;
; AVX-LABEL: test_div7_2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrq $1, %xmm0, %rax
; AVX-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925
; AVX-NEXT: imulq %rcx
@@ -77,7 +77,7 @@ define <2 x i64> @test_div7_2i64(<2 x i6
define <4 x i32> @test_div7_4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: test_div7_4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psrad $31, %xmm2
@@ -103,7 +103,7 @@ define <4 x i32> @test_div7_4i32(<4 x i3
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_div7_4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -120,7 +120,7 @@ define <4 x i32> @test_div7_4i32(<4 x i3
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_div7_4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -135,7 +135,7 @@ define <4 x i32> @test_div7_4i32(<4 x i3
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -154,7 +154,7 @@ define <4 x i32> @test_div7_4i32(<4 x i3
define <8 x i16> @test_div7_8i16(<8 x i16> %a) nounwind {
; SSE-LABEL: test_div7_8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pmulhw {{.*}}(%rip), %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrlw $15, %xmm1
@@ -163,7 +163,7 @@ define <8 x i16> @test_div7_8i16(<8 x i1
; SSE-NEXT: retq
;
; AVX-LABEL: test_div7_8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpsrlw $15, %xmm0, %xmm1
; AVX-NEXT: vpsraw $1, %xmm0, %xmm0
@@ -175,7 +175,7 @@ define <8 x i16> @test_div7_8i16(<8 x i1
define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_div7_16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2
@@ -202,7 +202,7 @@ define <16 x i8> @test_div7_16i8(<16 x i
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_div7_16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxbw %xmm0, %xmm1
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [65427,65427,65427,65427,65427,65427,65427,65427]
; SSE41-NEXT: pmullw %xmm2, %xmm1
@@ -226,7 +226,7 @@ define <16 x i8> @test_div7_16i8(<16 x i
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_div7_16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbw %xmm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [65427,65427,65427,65427,65427,65427,65427,65427]
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
@@ -248,7 +248,7 @@ define <16 x i8> @test_div7_16i8(<16 x i
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_div7_16i8:
-; AVX2NOBW: # BB#0:
+; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vpmovsxbw %xmm0, %ymm1
; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1
@@ -267,7 +267,7 @@ define <16 x i8> @test_div7_16i8(<16 x i
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_div7_16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm1
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
@@ -293,7 +293,7 @@ define <16 x i8> @test_div7_16i8(<16 x i
define <2 x i64> @test_rem7_2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: test_rem7_2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq %xmm0, %rcx
; SSE2-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925
; SSE2-NEXT: movq %rcx, %rax
@@ -323,7 +323,7 @@ define <2 x i64> @test_rem7_2i64(<2 x i6
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_rem7_2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrq $1, %xmm0, %rcx
; SSE41-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925
; SSE41-NEXT: movq %rcx, %rax
@@ -351,7 +351,7 @@ define <2 x i64> @test_rem7_2i64(<2 x i6
; SSE41-NEXT: retq
;
; AVX-LABEL: test_rem7_2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrq $1, %xmm0, %rcx
; AVX-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925
; AVX-NEXT: movq %rcx, %rax
@@ -383,7 +383,7 @@ define <2 x i64> @test_rem7_2i64(<2 x i6
define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: test_rem7_4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psrad $31, %xmm2
@@ -416,7 +416,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i3
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_rem7_4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -434,7 +434,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i3
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_rem7_4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -451,7 +451,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i3
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -473,7 +473,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i3
define <8 x i16> @test_rem7_8i16(<8 x i16> %a) nounwind {
; SSE-LABEL: test_rem7_8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [18725,18725,18725,18725,18725,18725,18725,18725]
; SSE-NEXT: pmulhw %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm2
@@ -485,7 +485,7 @@ define <8 x i16> @test_rem7_8i16(<8 x i1
; SSE-NEXT: retq
;
; AVX-LABEL: test_rem7_8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1
; AVX-NEXT: vpsrlw $15, %xmm1, %xmm2
; AVX-NEXT: vpsraw $1, %xmm1, %xmm1
@@ -499,7 +499,7 @@ define <8 x i16> @test_rem7_8i16(<8 x i1
define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_rem7_16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: psraw $8, %xmm2
@@ -538,7 +538,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_rem7_16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovsxbw %xmm0, %xmm1
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [65427,65427,65427,65427,65427,65427,65427,65427]
; SSE41-NEXT: pmullw %xmm2, %xmm1
@@ -572,7 +572,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_rem7_16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbw %xmm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [65427,65427,65427,65427,65427,65427,65427,65427]
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
@@ -605,7 +605,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_rem7_16i8:
-; AVX2NOBW: # BB#0:
+; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vpmovsxbw %xmm0, %ymm1
; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1
@@ -632,7 +632,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm1
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
Modified: llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll Mon Dec 4 09:18:51 2017
@@ -9,7 +9,7 @@
define <4 x i64> @test_div7_4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: test_div7_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm1, %rax
; AVX1-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925
@@ -46,7 +46,7 @@ define <4 x i64> @test_div7_4i64(<4 x i6
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpextrq $1, %xmm1, %rax
; AVX2-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925
@@ -87,7 +87,7 @@ define <4 x i64> @test_div7_4i64(<4 x i6
define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: test_div7_8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
@@ -115,7 +115,7 @@ define <8 x i32> @test_div7_8i32(<8 x i3
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
@@ -134,7 +134,7 @@ define <8 x i32> @test_div7_8i32(<8 x i3
define <16 x i16> @test_div7_16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: test_div7_16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [18725,18725,18725,18725,18725,18725,18725,18725]
; AVX1-NEXT: vpmulhw %xmm2, %xmm1, %xmm1
@@ -149,7 +149,7 @@ define <16 x i16> @test_div7_16i16(<16 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmulhw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpsrlw $15, %ymm0, %ymm1
; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
@@ -161,7 +161,7 @@ define <16 x i16> @test_div7_16i16(<16 x
define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: test_div7_32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpmovsxbw %xmm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
@@ -203,7 +203,7 @@ define <32 x i8> @test_div7_32i8(<32 x i
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_div7_32i8:
-; AVX2NOBW: # BB#0:
+; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1
; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm2 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427]
@@ -227,7 +227,7 @@ define <32 x i8> @test_div7_32i8(<32 x i
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_div7_32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm1
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
@@ -252,7 +252,7 @@ define <32 x i8> @test_div7_32i8(<32 x i
define <4 x i64> @test_rem7_4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: test_rem7_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm1, %rcx
; AVX1-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925
@@ -305,7 +305,7 @@ define <4 x i64> @test_rem7_4i64(<4 x i6
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpextrq $1, %xmm1, %rcx
; AVX2-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925
@@ -362,7 +362,7 @@ define <4 x i64> @test_rem7_4i64(<4 x i6
define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: test_rem7_8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
@@ -395,7 +395,7 @@ define <8 x i32> @test_rem7_8i32(<8 x i3
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
@@ -417,7 +417,7 @@ define <8 x i32> @test_rem7_8i32(<8 x i3
define <16 x i16> @test_rem7_16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: test_rem7_16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [18725,18725,18725,18725,18725,18725,18725,18725]
; AVX1-NEXT: vpmulhw %xmm2, %xmm1, %xmm3
@@ -437,7 +437,7 @@ define <16 x i16> @test_rem7_16i16(<16 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmulhw {{.*}}(%rip), %ymm0, %ymm1
; AVX2-NEXT: vpsrlw $15, %ymm1, %ymm2
; AVX2-NEXT: vpsraw $1, %ymm1, %ymm1
@@ -451,7 +451,7 @@ define <16 x i16> @test_rem7_16i16(<16 x
define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: test_rem7_32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpmovsxbw %xmm2, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [65427,65427,65427,65427,65427,65427,65427,65427]
@@ -513,7 +513,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_rem7_32i8:
-; AVX2NOBW: # BB#0:
+; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1
; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm2 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427]
@@ -554,7 +554,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm1
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
Modified: llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-512.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-512.ll Mon Dec 4 09:18:51 2017
@@ -8,7 +8,7 @@
define <8 x i64> @test_div7_8i64(<8 x i64> %a) nounwind {
; AVX-LABEL: test_div7_8i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; AVX-NEXT: vpextrq $1, %xmm1, %rax
; AVX-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925
@@ -83,7 +83,7 @@ define <8 x i64> @test_div7_8i64(<8 x i6
define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-LABEL: test_div7_16i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpbroadcastd {{.*#+}} zmm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
; AVX-NEXT: vpmuldq %zmm1, %zmm0, %zmm2
; AVX-NEXT: vpshufd {{.*#+}} zmm1 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -102,7 +102,7 @@ define <16 x i32> @test_div7_16i32(<16 x
define <32 x i16> @test_div7_32i16(<32 x i16> %a) nounwind {
; AVX512F-LABEL: test_div7_32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725]
; AVX512F-NEXT: vpmulhw %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $15, %ymm0, %ymm3
@@ -115,7 +115,7 @@ define <32 x i16> @test_div7_32i16(<32 x
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_div7_32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmulhw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vpsrlw $15, %zmm0, %zmm1
; AVX512BW-NEXT: vpsraw $1, %zmm0, %zmm0
@@ -127,7 +127,7 @@ define <32 x i16> @test_div7_32i16(<32 x
define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: test_div7_64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427]
@@ -171,7 +171,7 @@ define <64 x i8> @test_div7_64i8(<64 x i
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_div7_64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm1
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427]
; AVX512BW-NEXT: vpmullw %zmm2, %zmm1, %zmm1
@@ -203,7 +203,7 @@ define <64 x i8> @test_div7_64i8(<64 x i
define <8 x i64> @test_rem7_8i64(<8 x i64> %a) nounwind {
; AVX-LABEL: test_rem7_8i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; AVX-NEXT: vpextrq $1, %xmm1, %rcx
; AVX-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925
@@ -310,7 +310,7 @@ define <8 x i64> @test_rem7_8i64(<8 x i6
define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-LABEL: test_rem7_16i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpbroadcastd {{.*#+}} zmm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
; AVX-NEXT: vpmuldq %zmm1, %zmm0, %zmm2
; AVX-NEXT: vpshufd {{.*#+}} zmm1 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -331,7 +331,7 @@ define <16 x i32> @test_rem7_16i32(<16 x
define <32 x i16> @test_rem7_32i16(<32 x i16> %a) nounwind {
; AVX512F-LABEL: test_rem7_32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725,18725]
; AVX512F-NEXT: vpmulhw %ymm2, %ymm0, %ymm3
; AVX512F-NEXT: vpsrlw $15, %ymm3, %ymm4
@@ -349,7 +349,7 @@ define <32 x i16> @test_rem7_32i16(<32 x
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmulhw {{.*}}(%rip), %zmm0, %zmm1
; AVX512BW-NEXT: vpsrlw $15, %zmm1, %zmm2
; AVX512BW-NEXT: vpsraw $1, %zmm1, %zmm1
@@ -363,7 +363,7 @@ define <32 x i16> @test_rem7_32i16(<32 x
define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: test_rem7_64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427]
@@ -430,7 +430,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm1
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427]
; AVX512BW-NEXT: vpmullw %zmm2, %zmm1, %zmm1
Modified: llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-128.ll Mon Dec 4 09:18:51 2017
@@ -11,7 +11,7 @@
define <2 x i64> @test_div7_2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: test_div7_2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq %xmm0, %rcx
; SSE2-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
; SSE2-NEXT: movq %rcx, %rax
@@ -35,7 +35,7 @@ define <2 x i64> @test_div7_2i64(<2 x i6
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_div7_2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrq $1, %xmm0, %rcx
; SSE41-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
; SSE41-NEXT: movq %rcx, %rax
@@ -57,7 +57,7 @@ define <2 x i64> @test_div7_2i64(<2 x i6
; SSE41-NEXT: retq
;
; AVX-LABEL: test_div7_2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrq $1, %xmm0, %rcx
; AVX-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
; AVX-NEXT: movq %rcx, %rax
@@ -83,7 +83,7 @@ define <2 x i64> @test_div7_2i64(<2 x i6
define <4 x i32> @test_div7_4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: test_div7_4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pmuludq %xmm1, %xmm2
@@ -100,7 +100,7 @@ define <4 x i32> @test_div7_4i32(<4 x i3
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_div7_4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -115,7 +115,7 @@ define <4 x i32> @test_div7_4i32(<4 x i3
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_div7_4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -130,7 +130,7 @@ define <4 x i32> @test_div7_4i32(<4 x i3
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -149,7 +149,7 @@ define <4 x i32> @test_div7_4i32(<4 x i3
define <8 x i16> @test_div7_8i16(<8 x i16> %a) nounwind {
; SSE-LABEL: test_div7_8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363]
; SSE-NEXT: pmulhuw %xmm0, %xmm1
; SSE-NEXT: psubw %xmm1, %xmm0
@@ -159,7 +159,7 @@ define <8 x i16> @test_div7_8i16(<8 x i1
; SSE-NEXT: retq
;
; AVX-LABEL: test_div7_8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm1
; AVX-NEXT: vpsubw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
@@ -172,7 +172,7 @@ define <8 x i16> @test_div7_8i16(<8 x i1
define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_div7_16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
@@ -193,7 +193,7 @@ define <16 x i8> @test_div7_16i8(<16 x i
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_div7_16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37]
; SSE41-NEXT: pmullw %xmm2, %xmm1
@@ -212,7 +212,7 @@ define <16 x i8> @test_div7_16i8(<16 x i
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_div7_16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37]
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
@@ -231,7 +231,7 @@ define <16 x i8> @test_div7_16i8(<16 x i
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_div7_16i8:
-; AVX2NOBW: # BB#0:
+; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1
@@ -247,7 +247,7 @@ define <16 x i8> @test_div7_16i8(<16 x i
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_div7_16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
@@ -270,7 +270,7 @@ define <16 x i8> @test_div7_16i8(<16 x i
define <2 x i64> @test_rem7_2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: test_rem7_2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movq %xmm0, %rcx
; SSE2-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
; SSE2-NEXT: movq %rcx, %rax
@@ -302,7 +302,7 @@ define <2 x i64> @test_rem7_2i64(<2 x i6
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_rem7_2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pextrq $1, %xmm0, %rcx
; SSE41-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
; SSE41-NEXT: movq %rcx, %rax
@@ -332,7 +332,7 @@ define <2 x i64> @test_rem7_2i64(<2 x i6
; SSE41-NEXT: retq
;
; AVX-LABEL: test_rem7_2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpextrq $1, %xmm0, %rcx
; AVX-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
; AVX-NEXT: movq %rcx, %rax
@@ -366,7 +366,7 @@ define <2 x i64> @test_rem7_2i64(<2 x i6
define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: test_rem7_4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pmuludq %xmm1, %xmm2
@@ -392,7 +392,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i3
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_rem7_4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -410,7 +410,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i3
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_rem7_4i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -427,7 +427,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i3
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_4i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -449,7 +449,7 @@ define <4 x i32> @test_rem7_4i32(<4 x i3
define <8 x i16> @test_rem7_8i16(<8 x i16> %a) nounwind {
; SSE-LABEL: test_rem7_8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363]
; SSE-NEXT: pmulhuw %xmm0, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
@@ -462,7 +462,7 @@ define <8 x i16> @test_rem7_8i16(<8 x i1
; SSE-NEXT: retq
;
; AVX-LABEL: test_rem7_8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm1
; AVX-NEXT: vpsubw %xmm1, %xmm0, %xmm2
; AVX-NEXT: vpsrlw $1, %xmm2, %xmm2
@@ -477,7 +477,7 @@ define <8 x i16> @test_rem7_8i16(<8 x i1
define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_rem7_16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
@@ -512,7 +512,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_rem7_16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37]
; SSE41-NEXT: pmullw %xmm2, %xmm1
@@ -543,7 +543,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_rem7_16i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37]
; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
@@ -573,7 +573,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_rem7_16i8:
-; AVX2NOBW: # BB#0:
+; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1
@@ -597,7 +597,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_16i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
Modified: llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll Mon Dec 4 09:18:51 2017
@@ -9,7 +9,7 @@
define <4 x i64> @test_div7_4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: test_div7_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm1, %rcx
; AVX1-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
@@ -50,7 +50,7 @@ define <4 x i64> @test_div7_4i64(<4 x i6
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpextrq $1, %xmm1, %rcx
; AVX2-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
@@ -95,7 +95,7 @@ define <4 x i64> @test_div7_4i64(<4 x i6
define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: test_div7_8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -123,7 +123,7 @@ define <8 x i32> @test_div7_8i32(<8 x i3
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
@@ -142,7 +142,7 @@ define <8 x i32> @test_div7_8i32(<8 x i3
define <16 x i16> @test_div7_16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: test_div7_16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363]
; AVX1-NEXT: vpmulhuw %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm3
@@ -159,7 +159,7 @@ define <16 x i16> @test_div7_16i16(<16 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmulhuw {{.*}}(%rip), %ymm0, %ymm1
; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
@@ -172,7 +172,7 @@ define <16 x i16> @test_div7_16i16(<16 x
define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: test_div7_32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [37,37,37,37,37,37,37,37]
@@ -209,7 +209,7 @@ define <32 x i8> @test_div7_32i8(<32 x i
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_div7_32i8:
-; AVX2NOBW: # BB#0:
+; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
@@ -230,7 +230,7 @@ define <32 x i8> @test_div7_32i8(<32 x i
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_div7_32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
@@ -252,7 +252,7 @@ define <32 x i8> @test_div7_32i8(<32 x i
define <4 x i64> @test_rem7_4i64(<4 x i64> %a) nounwind {
; AVX1-LABEL: test_rem7_4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpextrq $1, %xmm1, %rcx
; AVX1-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
@@ -309,7 +309,7 @@ define <4 x i64> @test_rem7_4i64(<4 x i6
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpextrq $1, %xmm1, %rcx
; AVX2-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
@@ -370,7 +370,7 @@ define <4 x i64> @test_rem7_4i64(<4 x i6
define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: test_rem7_8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
@@ -403,7 +403,7 @@ define <8 x i32> @test_rem7_8i32(<8 x i3
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
@@ -425,7 +425,7 @@ define <8 x i32> @test_rem7_8i32(<8 x i3
define <16 x i16> @test_rem7_16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: test_rem7_16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9363,9363,9363,9363,9363,9363,9363,9363]
; AVX1-NEXT: vpmulhuw %xmm2, %xmm1, %xmm3
@@ -447,7 +447,7 @@ define <16 x i16> @test_rem7_16i16(<16 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpmulhuw {{.*}}(%rip), %ymm0, %ymm1
; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm2
@@ -462,7 +462,7 @@ define <16 x i16> @test_rem7_16i16(<16 x
define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: test_rem7_32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [37,37,37,37,37,37,37,37]
@@ -519,7 +519,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_rem7_32i8:
-; AVX2NOBW: # BB#0:
+; AVX2NOBW: # %bb.0:
; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
@@ -557,7 +557,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_32i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
Modified: llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-512.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-512.ll Mon Dec 4 09:18:51 2017
@@ -8,7 +8,7 @@
define <8 x i64> @test_div7_8i64(<8 x i64> %a) nounwind {
; AVX-LABEL: test_div7_8i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; AVX-NEXT: vpextrq $1, %xmm1, %rcx
; AVX-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
@@ -91,7 +91,7 @@ define <8 x i64> @test_div7_8i64(<8 x i6
define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-LABEL: test_div7_16i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpbroadcastd {{.*#+}} zmm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
; AVX-NEXT: vpmuludq %zmm1, %zmm0, %zmm2
; AVX-NEXT: vpshufd {{.*#+}} zmm1 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -110,7 +110,7 @@ define <16 x i32> @test_div7_16i32(<16 x
define <32 x i16> @test_div7_32i16(<32 x i16> %a) nounwind {
; AVX512F-LABEL: test_div7_32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363]
; AVX512F-NEXT: vpmulhuw %ymm2, %ymm0, %ymm3
; AVX512F-NEXT: vpsubw %ymm3, %ymm0, %ymm0
@@ -125,7 +125,7 @@ define <32 x i16> @test_div7_32i16(<32 x
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_div7_32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmulhuw {{.*}}(%rip), %zmm0, %zmm1
; AVX512BW-NEXT: vpsubw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm0
@@ -138,7 +138,7 @@ define <32 x i16> @test_div7_32i16(<32 x
define <64 x i8> @test_div7_64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: test_div7_64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
@@ -177,7 +177,7 @@ define <64 x i8> @test_div7_64i8(<64 x i
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_div7_64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
; AVX512BW-NEXT: vpmullw %zmm2, %zmm1, %zmm1
@@ -206,7 +206,7 @@ define <64 x i8> @test_div7_64i8(<64 x i
define <8 x i64> @test_rem7_8i64(<8 x i64> %a) nounwind {
; AVX-LABEL: test_rem7_8i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; AVX-NEXT: vpextrq $1, %xmm1, %rcx
; AVX-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
@@ -321,7 +321,7 @@ define <8 x i64> @test_rem7_8i64(<8 x i6
define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-LABEL: test_rem7_16i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpbroadcastd {{.*#+}} zmm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
; AVX-NEXT: vpmuludq %zmm1, %zmm0, %zmm2
; AVX-NEXT: vpshufd {{.*#+}} zmm1 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
@@ -342,7 +342,7 @@ define <16 x i32> @test_rem7_16i32(<16 x
define <32 x i16> @test_rem7_32i16(<32 x i16> %a) nounwind {
; AVX512F-LABEL: test_rem7_32i16:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363,9363]
; AVX512F-NEXT: vpmulhuw %ymm2, %ymm0, %ymm3
; AVX512F-NEXT: vpsubw %ymm3, %ymm0, %ymm4
@@ -362,7 +362,7 @@ define <32 x i16> @test_rem7_32i16(<32 x
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmulhuw {{.*}}(%rip), %zmm0, %zmm1
; AVX512BW-NEXT: vpsubw %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpsrlw $1, %zmm2, %zmm2
@@ -377,7 +377,7 @@ define <32 x i16> @test_rem7_32i16(<32 x
define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: test_rem7_64i8:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
@@ -439,7 +439,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
; AVX512BW-NEXT: vpmullw %zmm2, %zmm1, %zmm1
Modified: llvm/trunk/test/CodeGen/X86/vector-idiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv.ll Mon Dec 4 09:18:51 2017
@@ -6,12 +6,12 @@
define <2 x i16> @test_urem_unary_v2i16() nounwind {
; SSE-LABEL: test_urem_unary_v2i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_urem_unary_v2i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%I8 = insertelement <2 x i16> zeroinitializer, i16 -1, i32 0
@@ -22,7 +22,7 @@ define <2 x i16> @test_urem_unary_v2i16(
define <4 x i32> @PR20355(<4 x i32> %a) nounwind {
; SSE2-LABEL: PR20355:
-; SSE2: # BB#0: # %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psrad $31, %xmm2
@@ -45,7 +45,7 @@ define <4 x i32> @PR20355(<4 x i32> %a)
; SSE2-NEXT: retq
;
; SSE41-LABEL: PR20355:
-; SSE41: # BB#0: # %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -59,7 +59,7 @@ define <4 x i32> @PR20355(<4 x i32> %a)
; SSE41-NEXT: retq
;
; AVX1-LABEL: PR20355:
-; AVX1: # BB#0: # %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
@@ -72,7 +72,7 @@ define <4 x i32> @PR20355(<4 x i32> %a)
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR20355:
-; AVX2: # BB#0: # %entry
+; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
Modified: llvm/trunk/test/CodeGen/X86/vector-interleave.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-interleave.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-interleave.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-interleave.ll Mon Dec 4 09:18:51 2017
@@ -9,7 +9,7 @@
; PR21281
define <64 x i16> @interleave8x8(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d, <8 x i16> %e, <8 x i16> %f, <8 x i16> %h, <8 x i16> %g) {
; SSE-LABEL: interleave8x8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm8
; SSE-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -58,7 +58,7 @@ define <64 x i16> @interleave8x8(<8 x i1
; SSE-NEXT: retq
;
; AVX1-LABEL: interleave8x8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
@@ -90,7 +90,7 @@ define <64 x i16> @interleave8x8(<8 x i1
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleave8x8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
Modified: llvm/trunk/test/CodeGen/X86/vector-lzcnt-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-lzcnt-128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-lzcnt-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-lzcnt-128.ll Mon Dec 4 09:18:51 2017
@@ -15,7 +15,7 @@
define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE2-LABEL: testv2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlq $1, %xmm1
; SSE2-NEXT: por %xmm0, %xmm1
@@ -55,7 +55,7 @@ define <2 x i64> @testv2i64(<2 x i64> %i
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv2i64:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrlq $1, %xmm1
; SSE3-NEXT: por %xmm0, %xmm1
@@ -95,7 +95,7 @@ define <2 x i64> @testv2i64(<2 x i64> %i
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv2i64:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm2, %xmm1
@@ -131,7 +131,7 @@ define <2 x i64> @testv2i64(<2 x i64> %i
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv2i64:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pand %xmm2, %xmm1
@@ -167,7 +167,7 @@ define <2 x i64> @testv2i64(<2 x i64> %i
; SSE41-NEXT: retq
;
; AVX-LABEL: testv2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -197,7 +197,7 @@ define <2 x i64> @testv2i64(<2 x i64> %i
; AVX-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv2i64:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -227,12 +227,12 @@ define <2 x i64> @testv2i64(<2 x i64> %i
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv2i64:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vplzcntq %xmm0, %xmm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv2i64:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -240,7 +240,7 @@ define <2 x i64> @testv2i64(<2 x i64> %i
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm2, %xmm1
@@ -281,7 +281,7 @@ define <2 x i64> @testv2i64(<2 x i64> %i
define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; SSE2-LABEL: testv2i64u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlq $1, %xmm1
; SSE2-NEXT: por %xmm0, %xmm1
@@ -321,7 +321,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv2i64u:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrlq $1, %xmm1
; SSE3-NEXT: por %xmm0, %xmm1
@@ -361,7 +361,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv2i64u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm2, %xmm1
@@ -397,7 +397,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv2i64u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pand %xmm2, %xmm1
@@ -433,7 +433,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
; SSE41-NEXT: retq
;
; AVX-LABEL: testv2i64u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -463,7 +463,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
; AVX-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv2i64u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -493,12 +493,12 @@ define <2 x i64> @testv2i64u(<2 x i64> %
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv2i64u:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vplzcntq %xmm0, %xmm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv2i64u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -506,7 +506,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv2i64u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm2, %xmm1
@@ -547,7 +547,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %
define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; SSE2-LABEL: testv4i32:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: por %xmm0, %xmm1
@@ -589,7 +589,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv4i32:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrld $1, %xmm1
; SSE3-NEXT: por %xmm0, %xmm1
@@ -631,7 +631,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv4i32:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm2, %xmm1
@@ -661,7 +661,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv4i32:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pand %xmm2, %xmm1
@@ -691,7 +691,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; SSE41-NEXT: retq
;
; AVX-LABEL: testv4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -716,7 +716,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; AVX-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv4i32:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -741,12 +741,12 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv4i32:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vplzcntd %xmm0, %xmm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv4i32:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -754,7 +754,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm2, %xmm1
@@ -789,7 +789,7 @@ define <4 x i32> @testv4i32(<4 x i32> %i
define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; SSE2-LABEL: testv4i32u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: por %xmm0, %xmm1
@@ -831,7 +831,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv4i32u:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrld $1, %xmm1
; SSE3-NEXT: por %xmm0, %xmm1
@@ -873,7 +873,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv4i32u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm2, %xmm1
@@ -903,7 +903,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv4i32u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pand %xmm2, %xmm1
@@ -933,7 +933,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
; SSE41-NEXT: retq
;
; AVX-LABEL: testv4i32u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -958,7 +958,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
; AVX-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv4i32u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -983,12 +983,12 @@ define <4 x i32> @testv4i32u(<4 x i32> %
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv4i32u:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vplzcntd %xmm0, %xmm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv4i32u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -996,7 +996,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv4i32u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm2, %xmm1
@@ -1031,7 +1031,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %
define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; SSE2-LABEL: testv8i16:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $1, %xmm1
; SSE2-NEXT: por %xmm0, %xmm1
@@ -1067,7 +1067,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv8i16:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrlw $1, %xmm1
; SSE3-NEXT: por %xmm0, %xmm1
@@ -1103,7 +1103,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv8i16:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm2, %xmm1
@@ -1127,7 +1127,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv8i16:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pand %xmm2, %xmm1
@@ -1151,7 +1151,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
; SSE41-NEXT: retq
;
; AVX-LABEL: testv8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1171,7 +1171,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
; AVX-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv8i16:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1191,7 +1191,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv8i16:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0
; AVX512VLCD-NEXT: vpmovdw %ymm0, %xmm0
@@ -1200,7 +1200,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv8i16:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: vpmovdw %zmm0, %ymm0
@@ -1209,7 +1209,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm2, %xmm1
@@ -1237,7 +1237,7 @@ define <8 x i16> @testv8i16(<8 x i16> %i
define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; SSE2-LABEL: testv8i16u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $1, %xmm1
; SSE2-NEXT: por %xmm0, %xmm1
@@ -1273,7 +1273,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv8i16u:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrlw $1, %xmm1
; SSE3-NEXT: por %xmm0, %xmm1
@@ -1309,7 +1309,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv8i16u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm2, %xmm1
@@ -1333,7 +1333,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv8i16u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pand %xmm2, %xmm1
@@ -1357,7 +1357,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
; SSE41-NEXT: retq
;
; AVX-LABEL: testv8i16u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1377,7 +1377,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
; AVX-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv8i16u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1397,7 +1397,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv8i16u:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0
; AVX512VLCD-NEXT: vpmovdw %ymm0, %xmm0
@@ -1406,7 +1406,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv8i16u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: vpmovdw %zmm0, %ymm0
@@ -1415,7 +1415,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
; AVX512CD-NEXT: retq
;
; X32-SSE-LABEL: testv8i16u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm2, %xmm1
@@ -1443,7 +1443,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %
define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; SSE2-LABEL: testv16i8:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $1, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
@@ -1476,7 +1476,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv16i8:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrlw $1, %xmm1
; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
@@ -1509,7 +1509,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv16i8:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pand %xmm2, %xmm3
@@ -1527,7 +1527,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv16i8:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: pand %xmm2, %xmm3
@@ -1545,7 +1545,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
; SSE41-NEXT: retq
;
; AVX-LABEL: testv16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1560,7 +1560,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
; AVX-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv16i8:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1575,7 +1575,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
; AVX512VLBWDQ-NEXT: retq
;
; AVX512-LABEL: testv16i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vplzcntd %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
@@ -1584,7 +1584,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
; AVX512-NEXT: retq
;
; X32-SSE-LABEL: testv16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-SSE-NEXT: movdqa %xmm0, %xmm3
; X32-SSE-NEXT: pand %xmm2, %xmm3
@@ -1606,7 +1606,7 @@ define <16 x i8> @testv16i8(<16 x i8> %i
define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; SSE2-LABEL: testv16i8u:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $1, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
@@ -1639,7 +1639,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %
; SSE2-NEXT: retq
;
; SSE3-LABEL: testv16i8u:
-; SSE3: # BB#0:
+; SSE3: # %bb.0:
; SSE3-NEXT: movdqa %xmm0, %xmm1
; SSE3-NEXT: psrlw $1, %xmm1
; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
@@ -1672,7 +1672,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %
; SSE3-NEXT: retq
;
; SSSE3-LABEL: testv16i8u:
-; SSSE3: # BB#0:
+; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pand %xmm2, %xmm3
@@ -1690,7 +1690,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testv16i8u:
-; SSE41: # BB#0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: pand %xmm2, %xmm3
@@ -1708,7 +1708,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %
; SSE41-NEXT: retq
;
; AVX-LABEL: testv16i8u:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1723,7 +1723,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %
; AVX-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv16i8u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1738,7 +1738,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %
; AVX512VLBWDQ-NEXT: retq
;
; AVX512-LABEL: testv16i8u:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vplzcntd %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
@@ -1747,7 +1747,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %
; AVX512-NEXT: retq
;
; X32-SSE-LABEL: testv16i8u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-SSE-NEXT: movdqa %xmm0, %xmm3
; X32-SSE-NEXT: pand %xmm2, %xmm3
@@ -1769,25 +1769,25 @@ define <16 x i8> @testv16i8u(<16 x i8> %
define <2 x i64> @foldv2i64() nounwind {
; SSE-LABEL: foldv2i64:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movl $55, %eax
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: retq
;
; NOBW-LABEL: foldv2i64:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: movl $55, %eax
; NOBW-NEXT: vmovq %rax, %xmm0
; NOBW-NEXT: retq
;
; AVX512VLBWDQ-LABEL: foldv2i64:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: movl $55, %eax
; AVX512VLBWDQ-NEXT: vmovq %rax, %xmm0
; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv2i64:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl $55, %eax
; X32-SSE-NEXT: movd %eax, %xmm0
; X32-SSE-NEXT: retl
@@ -1797,25 +1797,25 @@ define <2 x i64> @foldv2i64() nounwind {
define <2 x i64> @foldv2i64u() nounwind {
; SSE-LABEL: foldv2i64u:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movl $55, %eax
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: retq
;
; NOBW-LABEL: foldv2i64u:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: movl $55, %eax
; NOBW-NEXT: vmovq %rax, %xmm0
; NOBW-NEXT: retq
;
; AVX512VLBWDQ-LABEL: foldv2i64u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: movl $55, %eax
; AVX512VLBWDQ-NEXT: vmovq %rax, %xmm0
; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv2i64u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movl $55, %eax
; X32-SSE-NEXT: movd %eax, %xmm0
; X32-SSE-NEXT: retl
@@ -1825,22 +1825,22 @@ define <2 x i64> @foldv2i64u() nounwind
define <4 x i32> @foldv4i32() nounwind {
; SSE-LABEL: foldv4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [23,0,32,24]
; SSE-NEXT: retq
;
; NOBW-LABEL: foldv4i32:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
; NOBW-NEXT: retq
;
; AVX512VLBWDQ-LABEL: foldv4i32:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv4i32:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [23,0,32,24]
; X32-SSE-NEXT: retl
%out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>, i1 0)
@@ -1849,22 +1849,22 @@ define <4 x i32> @foldv4i32() nounwind {
define <4 x i32> @foldv4i32u() nounwind {
; SSE-LABEL: foldv4i32u:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [23,0,32,24]
; SSE-NEXT: retq
;
; NOBW-LABEL: foldv4i32u:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
; NOBW-NEXT: retq
;
; AVX512VLBWDQ-LABEL: foldv4i32u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv4i32u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [23,0,32,24]
; X32-SSE-NEXT: retl
%out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>, i1 -1)
@@ -1873,22 +1873,22 @@ define <4 x i32> @foldv4i32u() nounwind
define <8 x i16> @foldv8i16() nounwind {
; SSE-LABEL: foldv8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; SSE-NEXT: retq
;
; NOBW-LABEL: foldv8i16:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; NOBW-NEXT: retq
;
; AVX512VLBWDQ-LABEL: foldv8i16:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv8i16:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; X32-SSE-NEXT: retl
%out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>, i1 0)
@@ -1897,22 +1897,22 @@ define <8 x i16> @foldv8i16() nounwind {
define <8 x i16> @foldv8i16u() nounwind {
; SSE-LABEL: foldv8i16u:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; SSE-NEXT: retq
;
; NOBW-LABEL: foldv8i16u:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; NOBW-NEXT: retq
;
; AVX512VLBWDQ-LABEL: foldv8i16u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv8i16u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; X32-SSE-NEXT: retl
%out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>, i1 -1)
@@ -1921,22 +1921,22 @@ define <8 x i16> @foldv8i16u() nounwind
define <16 x i8> @foldv16i8() nounwind {
; SSE-LABEL: foldv16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; SSE-NEXT: retq
;
; NOBW-LABEL: foldv16i8:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; NOBW-NEXT: retq
;
; AVX512VLBWDQ-LABEL: foldv16i8:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv16i8:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; X32-SSE-NEXT: retl
%out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>, i1 0)
@@ -1945,22 +1945,22 @@ define <16 x i8> @foldv16i8() nounwind {
define <16 x i8> @foldv16i8u() nounwind {
; SSE-LABEL: foldv16i8u:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; SSE-NEXT: retq
;
; NOBW-LABEL: foldv16i8u:
-; NOBW: # BB#0:
+; NOBW: # %bb.0:
; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; NOBW-NEXT: retq
;
; AVX512VLBWDQ-LABEL: foldv16i8u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; AVX512VLBWDQ-NEXT: retq
;
; X32-SSE-LABEL: foldv16i8u:
-; X32-SSE: # BB#0:
+; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; X32-SSE-NEXT: retl
%out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>, i1 -1)
Modified: llvm/trunk/test/CodeGen/X86/vector-lzcnt-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-lzcnt-256.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-lzcnt-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-lzcnt-256.ll Mon Dec 4 09:18:51 2017
@@ -11,7 +11,7 @@
define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm1
@@ -66,7 +66,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -96,7 +96,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: testv4i64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -126,7 +126,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
; AVX512VL-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv4i64:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -156,19 +156,19 @@ define <4 x i64> @testv4i64(<4 x i64> %i
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv4i64:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vplzcntq %ymm0, %ymm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv4i64:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv4i64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -203,7 +203,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64u:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm1
@@ -258,7 +258,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv4i64u:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -288,7 +288,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: testv4i64u:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -318,7 +318,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
; AVX512VL-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv4i64u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -348,19 +348,19 @@ define <4 x i64> @testv4i64u(<4 x i64> %
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv4i64u:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vplzcntq %ymm0, %ymm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv4i64u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv4i64u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -395,7 +395,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX1-LABEL: testv8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -440,7 +440,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -465,7 +465,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: testv8i32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -490,7 +490,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
; AVX512VL-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv8i32:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -515,19 +515,19 @@ define <8 x i32> @testv8i32(<8 x i32> %i
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv8i32:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv8i32:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv8i32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -557,7 +557,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX1-LABEL: testv8i32u:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -602,7 +602,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv8i32u:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -627,7 +627,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: testv8i32u:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -652,7 +652,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
; AVX512VL-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv8i32u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -677,19 +677,19 @@ define <8 x i32> @testv8i32u(<8 x i32> %
; AVX512VLBWDQ-NEXT: retq
;
; AVX512VLCD-LABEL: testv8i32u:
-; AVX512VLCD: # BB#0:
+; AVX512VLCD: # %bb.0:
; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0
; AVX512VLCD-NEXT: retq
;
; AVX512CD-LABEL: testv8i32u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv8i32u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -719,7 +719,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX1-LABEL: testv16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -754,7 +754,7 @@ define <16 x i16> @testv16i16(<16 x i16>
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -774,7 +774,7 @@ define <16 x i16> @testv16i16(<16 x i16>
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: testv16i16:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -794,7 +794,7 @@ define <16 x i16> @testv16i16(<16 x i16>
; AVX512VL-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv16i16:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -814,7 +814,7 @@ define <16 x i16> @testv16i16(<16 x i16>
; AVX512VLBWDQ-NEXT: retq
;
; AVX512-LABEL: testv16i16:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512-NEXT: vplzcntd %zmm0, %zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
@@ -822,7 +822,7 @@ define <16 x i16> @testv16i16(<16 x i16>
; AVX512-NEXT: retq
;
; X32-AVX-LABEL: testv16i16:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -846,7 +846,7 @@ define <16 x i16> @testv16i16(<16 x i16>
define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX1-LABEL: testv16i16u:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -881,7 +881,7 @@ define <16 x i16> @testv16i16u(<16 x i16
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv16i16u:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -901,7 +901,7 @@ define <16 x i16> @testv16i16u(<16 x i16
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: testv16i16u:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -921,7 +921,7 @@ define <16 x i16> @testv16i16u(<16 x i16
; AVX512VL-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv16i16u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -941,7 +941,7 @@ define <16 x i16> @testv16i16u(<16 x i16
; AVX512VLBWDQ-NEXT: retq
;
; AVX512-LABEL: testv16i16u:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512-NEXT: vplzcntd %zmm0, %zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
@@ -949,7 +949,7 @@ define <16 x i16> @testv16i16u(<16 x i16
; AVX512-NEXT: retq
;
; X32-AVX-LABEL: testv16i16u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -973,7 +973,7 @@ define <16 x i16> @testv16i16u(<16 x i16
define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX1-LABEL: testv32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -998,7 +998,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1013,7 +1013,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: testv32i8:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1028,7 +1028,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
; AVX512VL-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv32i8:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1043,7 +1043,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
; AVX512VLBWDQ-NEXT: retq
;
; AVX512-LABEL: testv32i8:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
; AVX512-NEXT: vplzcntd %zmm1, %zmm1
@@ -1058,7 +1058,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
; AVX512-NEXT: retq
;
; X32-AVX-LABEL: testv32i8:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1077,7 +1077,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX1-LABEL: testv32i8u:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
@@ -1102,7 +1102,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
; AVX1-NEXT: retq
;
; AVX2-LABEL: testv32i8u:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1117,7 +1117,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: testv32i8u:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1132,7 +1132,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
; AVX512VL-NEXT: retq
;
; AVX512VLBWDQ-LABEL: testv32i8u:
-; AVX512VLBWDQ: # BB#0:
+; AVX512VLBWDQ: # %bb.0:
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VLBWDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1147,7 +1147,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
; AVX512VLBWDQ-NEXT: retq
;
; AVX512-LABEL: testv32i8u:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
; AVX512-NEXT: vplzcntd %zmm1, %zmm1
@@ -1162,7 +1162,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
; AVX512-NEXT: retq
;
; X32-AVX-LABEL: testv32i8u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -1181,12 +1181,12 @@ define <32 x i8> @testv32i8u(<32 x i8> %
define <4 x i64> @foldv4i64() nounwind {
; X64-LABEL: foldv4i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv4i64:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,0,0,64,0,56,0]
; X32-AVX-NEXT: retl
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
@@ -1195,12 +1195,12 @@ define <4 x i64> @foldv4i64() nounwind {
define <4 x i64> @foldv4i64u() nounwind {
; X64-LABEL: foldv4i64u:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv4i64u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,0,0,64,0,56,0]
; X32-AVX-NEXT: retl
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
@@ -1209,12 +1209,12 @@ define <4 x i64> @foldv4i64u() nounwind
define <8 x i32> @foldv8i32() nounwind {
; X64-LABEL: foldv8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv8i32:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
; X32-AVX-NEXT: retl
%out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 0)
@@ -1223,12 +1223,12 @@ define <8 x i32> @foldv8i32() nounwind {
define <8 x i32> @foldv8i32u() nounwind {
; X64-LABEL: foldv8i32u:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv8i32u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
; X32-AVX-NEXT: retl
%out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 -1)
@@ -1237,12 +1237,12 @@ define <8 x i32> @foldv8i32u() nounwind
define <16 x i16> @foldv16i16() nounwind {
; X64-LABEL: foldv16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv16i16:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
; X32-AVX-NEXT: retl
%out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 0)
@@ -1251,12 +1251,12 @@ define <16 x i16> @foldv16i16() nounwind
define <16 x i16> @foldv16i16u() nounwind {
; X64-LABEL: foldv16i16u:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv16i16u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
; X32-AVX-NEXT: retl
%out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 -1)
@@ -1265,12 +1265,12 @@ define <16 x i16> @foldv16i16u() nounwin
define <32 x i8> @foldv32i8() nounwind {
; X64-LABEL: foldv32i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv32i8:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
; X32-AVX-NEXT: retl
%out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 0)
@@ -1279,12 +1279,12 @@ define <32 x i8> @foldv32i8() nounwind {
define <32 x i8> @foldv32i8u() nounwind {
; X64-LABEL: foldv32i8u:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
; X64-NEXT: retq
;
; X32-AVX-LABEL: foldv32i8u:
-; X32-AVX: # BB#0:
+; X32-AVX: # %bb.0:
; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
; X32-AVX-NEXT: retl
%out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 -1)
Modified: llvm/trunk/test/CodeGen/X86/vector-lzcnt-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-lzcnt-512.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-lzcnt-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-lzcnt-512.ll Mon Dec 4 09:18:51 2017
@@ -6,17 +6,17 @@
define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512CD-LABEL: testv8i64:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv8i64:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv8i64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlq $1, %zmm0, %zmm1
; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrlq $2, %zmm0, %zmm1
@@ -44,7 +44,7 @@ define <8 x i64> @testv8i64(<8 x i64> %i
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: testv8i64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrlq $1, %zmm0, %zmm1
; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpsrlq $2, %zmm0, %zmm1
@@ -85,17 +85,17 @@ define <8 x i64> @testv8i64(<8 x i64> %i
define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512CD-LABEL: testv8i64u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv8i64u:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vplzcntq %zmm0, %zmm0
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv8i64u:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlq $1, %zmm0, %zmm1
; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrlq $2, %zmm0, %zmm1
@@ -123,7 +123,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: testv8i64u:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrlq $1, %zmm0, %zmm1
; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpsrlq $2, %zmm0, %zmm1
@@ -164,17 +164,17 @@ define <8 x i64> @testv8i64u(<8 x i64> %
define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512CD-LABEL: testv16i32:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv16i32:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv16i32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrld $1, %zmm0, %zmm1
; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrld $2, %zmm0, %zmm1
@@ -204,7 +204,7 @@ define <16 x i32> @testv16i32(<16 x i32>
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: testv16i32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrld $1, %zmm0, %zmm1
; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpsrld $2, %zmm0, %zmm1
@@ -251,17 +251,17 @@ define <16 x i32> @testv16i32(<16 x i32>
define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512CD-LABEL: testv16i32u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv16i32u:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv16i32u:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrld $1, %zmm0, %zmm1
; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrld $2, %zmm0, %zmm1
@@ -291,7 +291,7 @@ define <16 x i32> @testv16i32u(<16 x i32
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: testv16i32u:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpsrld $1, %zmm0, %zmm1
; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpsrld $2, %zmm0, %zmm1
@@ -338,7 +338,7 @@ define <16 x i32> @testv16i32u(<16 x i32
define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512CD-LABEL: testv32i16:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: vpmovdw %zmm0, %ymm0
@@ -351,7 +351,7 @@ define <32 x i16> @testv32i16(<32 x i16>
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv32i16:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512CDBW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512CDBW-NEXT: vplzcntd %zmm1, %zmm1
@@ -366,7 +366,7 @@ define <32 x i16> @testv32i16(<32 x i16>
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv32i16:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -388,7 +388,7 @@ define <32 x i16> @testv32i16(<32 x i16>
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: testv32i16:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -425,7 +425,7 @@ define <32 x i16> @testv32i16(<32 x i16>
define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
; AVX512CD-LABEL: testv32i16u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
; AVX512CD-NEXT: vpmovdw %zmm0, %ymm0
@@ -438,7 +438,7 @@ define <32 x i16> @testv32i16u(<32 x i16
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv32i16u:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512CDBW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512CDBW-NEXT: vplzcntd %zmm1, %zmm1
@@ -453,7 +453,7 @@ define <32 x i16> @testv32i16u(<32 x i16
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv32i16u:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -475,7 +475,7 @@ define <32 x i16> @testv32i16u(<32 x i16
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: testv32i16u:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -512,7 +512,7 @@ define <32 x i16> @testv32i16u(<32 x i16
define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512CD-LABEL: testv64i8:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512CD-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
; AVX512CD-NEXT: vplzcntd %zmm2, %zmm2
@@ -537,7 +537,7 @@ define <64 x i8> @testv64i8(<64 x i8> %i
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv64i8:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512CDBW-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
@@ -564,7 +564,7 @@ define <64 x i8> @testv64i8(<64 x i8> %i
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv64i8:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -579,7 +579,7 @@ define <64 x i8> @testv64i8(<64 x i8> %i
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: testv64i8:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -606,7 +606,7 @@ define <64 x i8> @testv64i8(<64 x i8> %i
define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
; AVX512CD-LABEL: testv64i8u:
-; AVX512CD: # BB#0:
+; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512CD-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
; AVX512CD-NEXT: vplzcntd %zmm2, %zmm2
@@ -631,7 +631,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %
; AVX512CD-NEXT: retq
;
; AVX512CDBW-LABEL: testv64i8u:
-; AVX512CDBW: # BB#0:
+; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512CDBW-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
@@ -658,7 +658,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %
; AVX512CDBW-NEXT: retq
;
; AVX512BW-LABEL: testv64i8u:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
@@ -673,7 +673,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: testv64i8u:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
Modified: llvm/trunk/test/CodeGen/X86/vector-merge-store-fp-constants.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-merge-store-fp-constants.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-merge-store-fp-constants.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-merge-store-fp-constants.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define void @merge_8_float_zero_stores(float* %ptr) {
; DEFAULTCPU-LABEL: merge_8_float_zero_stores:
-; DEFAULTCPU: # BB#0:
+; DEFAULTCPU: # %bb.0:
; DEFAULTCPU-NEXT: movq $0, (%rdi)
; DEFAULTCPU-NEXT: movq $0, 8(%rdi)
; DEFAULTCPU-NEXT: movq $0, 16(%rdi)
@@ -12,7 +12,7 @@ define void @merge_8_float_zero_stores(f
; DEFAULTCPU-NEXT: retq
;
; X64CPU-LABEL: merge_8_float_zero_stores:
-; X64CPU: # BB#0:
+; X64CPU: # %bb.0:
; X64CPU-NEXT: xorps %xmm0, %xmm0
; X64CPU-NEXT: movups %xmm0, (%rdi)
; X64CPU-NEXT: movups %xmm0, 16(%rdi)
Modified: llvm/trunk/test/CodeGen/X86/vector-mul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-mul.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-mul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-mul.ll Mon Dec 4 09:18:51 2017
@@ -10,17 +10,17 @@
define <2 x i64> @mul_v2i64_8(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: psllq $3, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psllq $3, %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_8:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsllq $3, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <2 x i64> %a0, <i64 8, i64 8>
@@ -29,17 +29,17 @@ define <2 x i64> @mul_v2i64_8(<2 x i64>
define <4 x i32> @mul_v4i32_8(<4 x i32> %a0) nounwind {
; X86-LABEL: mul_v4i32_8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pslld $3, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v4i32_8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pslld $3, %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v4i32_8:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpslld $3, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <4 x i32> %a0, <i32 8, i32 8, i32 8, i32 8>
@@ -48,17 +48,17 @@ define <4 x i32> @mul_v4i32_8(<4 x i32>
define <8 x i16> @mul_v8i16_8(<8 x i16> %a0) nounwind {
; X86-LABEL: mul_v8i16_8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: psllw $3, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v8i16_8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psllw $3, %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v8i16_8:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsllw $3, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <8 x i16> %a0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -67,24 +67,24 @@ define <8 x i16> @mul_v8i16_8(<8 x i16>
define <16 x i8> @mul_v16i8_32(<16 x i8> %a0) nounwind {
; X86-LABEL: mul_v16i8_32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: psllw $5, %xmm0
; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v16i8_32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: psllw $5, %xmm0
; X64-NEXT: pand {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v16i8_32:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v16i8_32:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpsllw $5, %xmm0, %xmm0
; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX2-NEXT: retq
@@ -98,7 +98,7 @@ define <16 x i8> @mul_v16i8_32(<16 x i8>
define <2 x i64> @mul_v2i64_32_8(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_32_8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa %xmm0, %xmm1
; X86-NEXT: psllq $3, %xmm1
; X86-NEXT: psllq $5, %xmm0
@@ -106,7 +106,7 @@ define <2 x i64> @mul_v2i64_32_8(<2 x i6
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_32_8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psllq $3, %xmm1
; X64-NEXT: psllq $5, %xmm0
@@ -114,12 +114,12 @@ define <2 x i64> @mul_v2i64_32_8(<2 x i6
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v2i64_32_8:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm0
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v2i64_32_8:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%1 = mul <2 x i64> %a0, <i64 32, i64 8>
@@ -128,22 +128,22 @@ define <2 x i64> @mul_v2i64_32_8(<2 x i6
define <4 x i32> @mul_v4i32_1_2_4_8(<4 x i32> %a0) nounwind {
; X86-LABEL: mul_v4i32_1_2_4_8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v4i32_1_2_4_8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulld {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v4i32_1_2_4_8:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v4i32_1_2_4_8:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%1 = mul <4 x i32> %a0, <i32 1, i32 2, i32 4, i32 8>
@@ -152,22 +152,22 @@ define <4 x i32> @mul_v4i32_1_2_4_8(<4 x
define <8 x i16> @mul_v8i16_1_2_4_8_16_32_64_128(<8 x i16> %a0) nounwind {
; X86-LABEL: mul_v8i16_1_2_4_8_16_32_64_128:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v8i16_1_2_4_8_16_32_64_128:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmullw {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v8i16_1_2_4_8_16_32_64_128:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v8i16_1_2_4_8_16_32_64_128:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%1 = mul <8 x i16> %a0, <i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128>
@@ -176,7 +176,7 @@ define <8 x i16> @mul_v8i16_1_2_4_8_16_3
define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounwind {
; X86-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa %xmm0, %xmm1
; X86-NEXT: movdqa %xmm1, %xmm2
; X86-NEXT: psllw $4, %xmm2
@@ -196,7 +196,7 @@ define <16 x i8> @mul_v16i8_1_2_4_8_1_2_
; X86-NEXT: retl
;
; X64-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: movdqa %xmm1, %xmm2
; X64-NEXT: psllw $4, %xmm2
@@ -216,12 +216,12 @@ define <16 x i8> @mul_v16i8_1_2_4_8_1_2_
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpsllw $4, %xmm0, %xmm1
; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [8192,24640,8192,24640,8192,24640,8192,24640]
@@ -244,7 +244,7 @@ define <16 x i8> @mul_v16i8_1_2_4_8_1_2_
define <2 x i64> @mul_v2i64_17(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_17:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [17,0,17,0]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -255,7 +255,7 @@ define <2 x i64> @mul_v2i64_17(<2 x i64>
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_17:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm1 = [17,17]
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: pmuludq %xmm1, %xmm2
@@ -266,7 +266,7 @@ define <2 x i64> @mul_v2i64_17(<2 x i64>
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_17:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [17,17]
; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
@@ -280,22 +280,22 @@ define <2 x i64> @mul_v2i64_17(<2 x i64>
define <4 x i32> @mul_v4i32_17(<4 x i32> %a0) nounwind {
; X86-LABEL: mul_v4i32_17:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v4i32_17:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulld {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v4i32_17:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v4i32_17:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [17,17,17,17]
; X64-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
@@ -305,17 +305,17 @@ define <4 x i32> @mul_v4i32_17(<4 x i32>
define <8 x i16> @mul_v8i16_17(<8 x i16> %a0) nounwind {
; X86-LABEL: mul_v8i16_17:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v8i16_17:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmullw {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v8i16_17:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <8 x i16> %a0, <i16 17, i16 17, i16 17, i16 17, i16 17, i16 17, i16 17, i16 17>
@@ -324,7 +324,7 @@ define <8 x i16> @mul_v8i16_17(<8 x i16>
define <16 x i8> @mul_v16i8_17(<16 x i8> %a0) nounwind {
; X86-LABEL: mul_v16i8_17:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmovsxbw %xmm0, %xmm1
; X86-NEXT: movdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17]
; X86-NEXT: pmullw %xmm2, %xmm1
@@ -339,7 +339,7 @@ define <16 x i8> @mul_v16i8_17(<16 x i8>
; X86-NEXT: retl
;
; X64-LABEL: mul_v16i8_17:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovsxbw %xmm0, %xmm1
; X64-NEXT: movdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17]
; X64-NEXT: pmullw %xmm2, %xmm1
@@ -354,7 +354,7 @@ define <16 x i8> @mul_v16i8_17(<16 x i8>
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v16i8_17:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1
; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17]
; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1
@@ -365,7 +365,7 @@ define <16 x i8> @mul_v16i8_17(<16 x i8>
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v16i8_17:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -385,7 +385,7 @@ define <16 x i8> @mul_v16i8_17(<16 x i8>
define <2 x i64> @mul_v2i64_17_65(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_17_65:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [17,0,65,0]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -396,7 +396,7 @@ define <2 x i64> @mul_v2i64_17_65(<2 x i
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_17_65:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm1 = [17,65]
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: pmuludq %xmm1, %xmm2
@@ -407,7 +407,7 @@ define <2 x i64> @mul_v2i64_17_65(<2 x i
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_17_65:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [17,65]
; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
@@ -421,17 +421,17 @@ define <2 x i64> @mul_v2i64_17_65(<2 x i
define <4 x i32> @mul_v4i32_5_17_33_65(<4 x i32> %a0) nounwind {
; X86-LABEL: mul_v4i32_5_17_33_65:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v4i32_5_17_33_65:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulld {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v4i32_5_17_33_65:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <4 x i32> %a0, <i32 5, i32 17, i32 33, i32 65>
@@ -440,17 +440,17 @@ define <4 x i32> @mul_v4i32_5_17_33_65(<
define <8 x i16> @mul_v8i16_2_3_9_17_33_65_129_257(<8 x i16> %a0) nounwind {
; X86-LABEL: mul_v8i16_2_3_9_17_33_65_129_257:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v8i16_2_3_9_17_33_65_129_257:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmullw {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v8i16_2_3_9_17_33_65_129_257:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <8 x i16> %a0, <i16 2, i16 3, i16 9, i16 17, i16 33, i16 65, i16 129, i16 257>
@@ -459,7 +459,7 @@ define <8 x i16> @mul_v8i16_2_3_9_17_33_
define <16 x i8> @mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3(<16 x i8> %a0) nounwind {
; X86-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmovsxbw %xmm0, %xmm1
; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm1
; X86-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -473,7 +473,7 @@ define <16 x i8> @mul_v16i8_2_3_9_17_33_
; X86-NEXT: retl
;
; X64-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovsxbw %xmm0, %xmm1
; X64-NEXT: pmullw {{.*}}(%rip), %xmm1
; X64-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
@@ -487,7 +487,7 @@ define <16 x i8> @mul_v16i8_2_3_9_17_33_
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1
; X64-XOP-NEXT: vpmullw {{.*}}(%rip), %xmm1, %xmm1
; X64-XOP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -497,7 +497,7 @@ define <16 x i8> @mul_v16i8_2_3_9_17_33_
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -517,7 +517,7 @@ define <16 x i8> @mul_v16i8_2_3_9_17_33_
define <2 x i64> @mul_v2i64_7(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_7:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [7,0,7,0]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -528,7 +528,7 @@ define <2 x i64> @mul_v2i64_7(<2 x i64>
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm1 = [7,7]
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: pmuludq %xmm1, %xmm2
@@ -539,7 +539,7 @@ define <2 x i64> @mul_v2i64_7(<2 x i64>
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_7:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [7,7]
; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
@@ -553,22 +553,22 @@ define <2 x i64> @mul_v2i64_7(<2 x i64>
define <4 x i32> @mul_v4i32_7(<4 x i32> %a0) nounwind {
; X86-LABEL: mul_v4i32_7:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v4i32_7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulld {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v4i32_7:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v4i32_7:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [7,7,7,7]
; X64-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
@@ -578,17 +578,17 @@ define <4 x i32> @mul_v4i32_7(<4 x i32>
define <8 x i16> @mul_v8i16_7(<8 x i16> %a0) nounwind {
; X86-LABEL: mul_v8i16_7:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v8i16_7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmullw {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v8i16_7:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <8 x i16> %a0, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
@@ -597,7 +597,7 @@ define <8 x i16> @mul_v8i16_7(<8 x i16>
define <16 x i8> @mul_v16i8_31(<16 x i8> %a0) nounwind {
; X86-LABEL: mul_v16i8_31:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmovsxbw %xmm0, %xmm1
; X86-NEXT: movdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31]
; X86-NEXT: pmullw %xmm2, %xmm1
@@ -612,7 +612,7 @@ define <16 x i8> @mul_v16i8_31(<16 x i8>
; X86-NEXT: retl
;
; X64-LABEL: mul_v16i8_31:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovsxbw %xmm0, %xmm1
; X64-NEXT: movdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31]
; X64-NEXT: pmullw %xmm2, %xmm1
@@ -627,7 +627,7 @@ define <16 x i8> @mul_v16i8_31(<16 x i8>
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v16i8_31:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1
; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31]
; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1
@@ -638,7 +638,7 @@ define <16 x i8> @mul_v16i8_31(<16 x i8>
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v16i8_31:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -658,7 +658,7 @@ define <16 x i8> @mul_v16i8_31(<16 x i8>
define <2 x i64> @mul_v2i64_15_63(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_15_63:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [15,0,63,0]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -669,7 +669,7 @@ define <2 x i64> @mul_v2i64_15_63(<2 x i
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_15_63:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm1 = [15,63]
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: pmuludq %xmm1, %xmm2
@@ -680,7 +680,7 @@ define <2 x i64> @mul_v2i64_15_63(<2 x i
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_15_63:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,63]
; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
@@ -694,7 +694,7 @@ define <2 x i64> @mul_v2i64_15_63(<2 x i
define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_neg_15_63:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967281,4294967295,4294967233,4294967295]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -708,7 +708,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_neg_15_63:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551601,18446744073709551553]
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: pmuludq %xmm1, %xmm2
@@ -722,7 +722,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_neg_15_63:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551601,18446744073709551553]
; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm3
@@ -738,7 +738,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2
define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_neg_17_65:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967279,4294967295,4294967231,4294967295]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -752,7 +752,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_neg_17_65:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm1 = [18446744073709551599,18446744073709551551]
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: pmuludq %xmm1, %xmm2
@@ -766,7 +766,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_neg_17_65:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551599,18446744073709551551]
; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm3
@@ -782,7 +782,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2
define <2 x i64> @mul_v2i64_0_1(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_0_1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [0,0,1,0]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -793,7 +793,7 @@ define <2 x i64> @mul_v2i64_0_1(<2 x i64
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_0_1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $1, %eax
; X64-NEXT: movq %rax, %xmm1
; X64-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
@@ -806,7 +806,7 @@ define <2 x i64> @mul_v2i64_0_1(<2 x i64
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_0_1:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: movl $1, %eax
; X64-AVX-NEXT: vmovq %rax, %xmm1
; X64-AVX-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
@@ -822,7 +822,7 @@ define <2 x i64> @mul_v2i64_0_1(<2 x i64
define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_neg_0_1:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [0,0,4294967295,4294967295]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -836,7 +836,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_neg_0_1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psrlq $32, %xmm1
; X64-NEXT: movq $-1, %rax
@@ -854,7 +854,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_neg_0_1:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm1
; X64-AVX-NEXT: movq $-1, %rax
; X64-AVX-NEXT: vmovq %rax, %xmm2
@@ -875,7 +875,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x
define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_15_neg_63:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [15,0,4294967233,4294967295]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -889,7 +889,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_15_neg_63:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa %xmm0, %xmm1
; X64-NEXT: psrlq $32, %xmm1
; X64-NEXT: movdqa {{.*#+}} xmm2 = [15,18446744073709551553]
@@ -905,7 +905,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_15_neg_63:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm1
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [15,18446744073709551553]
; X64-AVX-NEXT: vpmuludq %xmm2, %xmm1, %xmm1
@@ -924,17 +924,17 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2
define <4 x i32> @mul_v4i32_0_15_31_7(<4 x i32> %a0) nounwind {
; X86-LABEL: mul_v4i32_0_15_31_7:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v4i32_0_15_31_7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmulld {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v4i32_0_15_31_7:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <4 x i32> %a0, <i32 0, i32 15, i32 31, i32 7>
@@ -943,17 +943,17 @@ define <4 x i32> @mul_v4i32_0_15_31_7(<4
define <8 x i16> @mul_v8i16_0_1_7_15_31_63_127_255(<8 x i16> %a0) nounwind {
; X86-LABEL: mul_v8i16_0_1_7_15_31_63_127_255:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v8i16_0_1_7_15_31_63_127_255:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmullw {{.*}}(%rip), %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v8i16_0_1_7_15_31_63_127_255:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <8 x i16> %a0, <i16 0, i16 1, i16 7, i16 15, i16 31, i16 63, i16 127, i16 255>
@@ -962,7 +962,7 @@ define <8 x i16> @mul_v8i16_0_1_7_15_31_
define <16 x i8> @mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127(<16 x i8> %a0) nounwind {
; X86-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: pmovsxbw %xmm0, %xmm1
; X86-NEXT: movdqa {{.*#+}} xmm2 = [0,1,3,7,15,31,63,127]
; X86-NEXT: pmullw %xmm2, %xmm1
@@ -977,7 +977,7 @@ define <16 x i8> @mul_v16i8_0_1_3_7_15_3
; X86-NEXT: retl
;
; X64-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: pmovsxbw %xmm0, %xmm1
; X64-NEXT: movdqa {{.*#+}} xmm2 = [0,1,3,7,15,31,63,127]
; X64-NEXT: pmullw %xmm2, %xmm1
@@ -992,7 +992,7 @@ define <16 x i8> @mul_v16i8_0_1_3_7_15_3
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127:
-; X64-XOP: # BB#0:
+; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1
; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,3,7,15,31,63,127]
; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1
@@ -1003,7 +1003,7 @@ define <16 x i8> @mul_v16i8_0_1_3_7_15_3
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127:
-; X64-AVX2: # BB#0:
+; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -1019,7 +1019,7 @@ define <16 x i8> @mul_v16i8_0_1_3_7_15_3
define <2 x i64> @mul_v2i64_68_132(<2 x i64> %x) nounwind {
; X86-LABEL: mul_v2i64_68_132:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [68,0,132,0]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -1030,7 +1030,7 @@ define <2 x i64> @mul_v2i64_68_132(<2 x
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_68_132:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm1 = [68,132]
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: pmuludq %xmm1, %xmm2
@@ -1041,7 +1041,7 @@ define <2 x i64> @mul_v2i64_68_132(<2 x
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_68_132:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [68,132]
; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
@@ -1055,7 +1055,7 @@ define <2 x i64> @mul_v2i64_68_132(<2 x
define <2 x i64> @mul_v2i64_60_120(<2 x i64> %x) nounwind {
; X86-LABEL: mul_v2i64_60_120:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [60,0,124,0]
; X86-NEXT: movdqa %xmm0, %xmm2
; X86-NEXT: pmuludq %xmm1, %xmm2
@@ -1066,7 +1066,7 @@ define <2 x i64> @mul_v2i64_60_120(<2 x
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_60_120:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movdqa {{.*#+}} xmm1 = [60,124]
; X64-NEXT: movdqa %xmm0, %xmm2
; X64-NEXT: pmuludq %xmm1, %xmm2
@@ -1077,7 +1077,7 @@ define <2 x i64> @mul_v2i64_60_120(<2 x
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_60_120:
-; X64-AVX: # BB#0:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [60,124]
; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/vector-narrow-binop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-narrow-binop.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-narrow-binop.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-narrow-binop.ll Mon Dec 4 09:18:51 2017
@@ -11,7 +11,7 @@
define <8 x i32> @PR32790(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; SSE-LABEL: PR32790:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: paddd %xmm2, %xmm0
; SSE-NEXT: paddd %xmm3, %xmm1
; SSE-NEXT: pand %xmm5, %xmm1
@@ -21,7 +21,7 @@ define <8 x i32> @PR32790(<8 x i32> %a,
; SSE-NEXT: retq
;
; AVX1-LABEL: PR32790:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
@@ -36,14 +36,14 @@ define <8 x i32> @PR32790(<8 x i32> %a,
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR32790:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsubd %ymm3, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: PR32790:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX512-NEXT: vpsubd %ymm3, %ymm0, %ymm0
@@ -59,14 +59,14 @@ define <8 x i32> @PR32790(<8 x i32> %a,
define <4 x i32> @do_not_use_256bit_op(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
; SSE-LABEL: do_not_use_256bit_op:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: pand %xmm3, %xmm1
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: do_not_use_256bit_op:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/vector-pcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-pcmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-pcmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-pcmp.ll Mon Dec 4 09:18:51 2017
@@ -9,13 +9,13 @@
define <16 x i8> @test_pcmpgtb(<16 x i8> %x) {
; SSE-LABEL: test_pcmpgtb:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pcmpgtb %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_pcmpgtb:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -26,13 +26,13 @@ define <16 x i8> @test_pcmpgtb(<16 x i8>
define <8 x i16> @test_pcmpgtw(<8 x i16> %x) {
; SSE-LABEL: test_pcmpgtw:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pcmpgtw %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_pcmpgtw:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -43,13 +43,13 @@ define <8 x i16> @test_pcmpgtw(<8 x i16>
define <4 x i32> @test_pcmpgtd(<4 x i32> %x) {
; SSE-LABEL: test_pcmpgtd:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: pcmpgtd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_pcmpgtd:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -60,7 +60,7 @@ define <4 x i32> @test_pcmpgtd(<4 x i32>
define <2 x i64> @test_pcmpgtq(<2 x i64> %x) {
; SSE2-LABEL: test_pcmpgtq:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
@@ -68,13 +68,13 @@ define <2 x i64> @test_pcmpgtq(<2 x i64>
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_pcmpgtq:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqd %xmm1, %xmm1
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: test_pcmpgtq:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -85,7 +85,7 @@ define <2 x i64> @test_pcmpgtq(<2 x i64>
define <1 x i128> @test_strange_type(<1 x i128> %x) {
; SSE2-LABEL: test_strange_type:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: sarq $63, %rsi
; SSE2-NEXT: movq %rsi, %xmm0
; SSE2-NEXT: notq %rsi
@@ -97,7 +97,7 @@ define <1 x i128> @test_strange_type(<1
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_strange_type:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: sarq $63, %rsi
; SSE42-NEXT: movq %rsi, %xmm0
; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
@@ -108,7 +108,7 @@ define <1 x i128> @test_strange_type(<1
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_strange_type:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: sarq $63, %rsi
; AVX1-NEXT: vmovq %rsi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
@@ -119,7 +119,7 @@ define <1 x i128> @test_strange_type(<1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_strange_type:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: sarq $63, %rsi
; AVX2-NEXT: vmovq %rsi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
@@ -135,14 +135,14 @@ define <1 x i128> @test_strange_type(<1
define <32 x i8> @test_pcmpgtb_256(<32 x i8> %x) {
; SSE-LABEL: test_pcmpgtb_256:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm2, %xmm2
; SSE-NEXT: pcmpgtb %xmm2, %xmm0
; SSE-NEXT: pcmpgtb %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_pcmpgtb_256:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
@@ -154,7 +154,7 @@ define <32 x i8> @test_pcmpgtb_256(<32 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_pcmpgtb_256:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -165,14 +165,14 @@ define <32 x i8> @test_pcmpgtb_256(<32 x
define <16 x i16> @test_pcmpgtw_256(<16 x i16> %x) {
; SSE-LABEL: test_pcmpgtw_256:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm2, %xmm2
; SSE-NEXT: pcmpgtw %xmm2, %xmm0
; SSE-NEXT: pcmpgtw %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_pcmpgtw_256:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
@@ -183,7 +183,7 @@ define <16 x i16> @test_pcmpgtw_256(<16
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_pcmpgtw_256:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -194,14 +194,14 @@ define <16 x i16> @test_pcmpgtw_256(<16
define <8 x i32> @test_pcmpgtd_256(<8 x i32> %x) {
; SSE-LABEL: test_pcmpgtd_256:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm2, %xmm2
; SSE-NEXT: pcmpgtd %xmm2, %xmm0
; SSE-NEXT: pcmpgtd %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_pcmpgtd_256:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
@@ -212,7 +212,7 @@ define <8 x i32> @test_pcmpgtd_256(<8 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_pcmpgtd_256:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -223,7 +223,7 @@ define <8 x i32> @test_pcmpgtd_256(<8 x
define <4 x i64> @test_pcmpgtq_256(<4 x i64> %x) {
; SSE2-LABEL: test_pcmpgtq_256:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE2-NEXT: psrad $31, %xmm0
@@ -234,14 +234,14 @@ define <4 x i64> @test_pcmpgtq_256(<4 x
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_pcmpgtq_256:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqd %xmm2, %xmm2
; SSE42-NEXT: pcmpgtq %xmm2, %xmm0
; SSE42-NEXT: pcmpgtq %xmm2, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_pcmpgtq_256:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1
@@ -253,7 +253,7 @@ define <4 x i64> @test_pcmpgtq_256(<4 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_pcmpgtq_256:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -264,13 +264,13 @@ define <4 x i64> @test_pcmpgtq_256(<4 x
define <16 x i8> @cmpeq_zext_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: cmpeq_zext_v16i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: cmpeq_zext_v16i8:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
@@ -281,7 +281,7 @@ define <16 x i8> @cmpeq_zext_v16i8(<16 x
define <16 x i16> @cmpeq_zext_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: cmpeq_zext_v16i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqw %xmm2, %xmm0
; SSE-NEXT: psrlw $15, %xmm0
; SSE-NEXT: pcmpeqw %xmm3, %xmm1
@@ -289,7 +289,7 @@ define <16 x i16> @cmpeq_zext_v16i16(<16
; SSE-NEXT: retq
;
; AVX1-LABEL: cmpeq_zext_v16i16:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpeqw %xmm2, %xmm3, %xmm2
@@ -299,7 +299,7 @@ define <16 x i16> @cmpeq_zext_v16i16(<16
; AVX1-NEXT: retq
;
; AVX2-LABEL: cmpeq_zext_v16i16:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrlw $15, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -310,13 +310,13 @@ define <16 x i16> @cmpeq_zext_v16i16(<16
define <4 x i32> @cmpeq_zext_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: cmpeq_zext_v4i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
; SSE-NEXT: psrld $31, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: cmpeq_zext_v4i32:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrld $31, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -327,7 +327,7 @@ define <4 x i32> @cmpeq_zext_v4i32(<4 x
define <4 x i64> @cmpeq_zext_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: cmpeq_zext_v4i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: pcmpeqd %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,0,3,2]
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1]
@@ -340,7 +340,7 @@ define <4 x i64> @cmpeq_zext_v4i64(<4 x
; SSE2-NEXT: retq
;
; SSE42-LABEL: cmpeq_zext_v4i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqq %xmm2, %xmm0
; SSE42-NEXT: psrlq $63, %xmm0
; SSE42-NEXT: pcmpeqq %xmm3, %xmm1
@@ -348,7 +348,7 @@ define <4 x i64> @cmpeq_zext_v4i64(<4 x
; SSE42-NEXT: retq
;
; AVX1-LABEL: cmpeq_zext_v4i64:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpeqq %xmm2, %xmm3, %xmm2
@@ -358,7 +358,7 @@ define <4 x i64> @cmpeq_zext_v4i64(<4 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: cmpeq_zext_v4i64:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrlq $63, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -369,7 +369,7 @@ define <4 x i64> @cmpeq_zext_v4i64(<4 x
define <32 x i8> @cmpgt_zext_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: cmpgt_zext_v32i8:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtb %xmm2, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; SSE-NEXT: pand %xmm2, %xmm0
@@ -378,7 +378,7 @@ define <32 x i8> @cmpgt_zext_v32i8(<32 x
; SSE-NEXT: retq
;
; AVX1-LABEL: cmpgt_zext_v32i8:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
@@ -388,7 +388,7 @@ define <32 x i8> @cmpgt_zext_v32i8(<32 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: cmpgt_zext_v32i8:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -399,13 +399,13 @@ define <32 x i8> @cmpgt_zext_v32i8(<32 x
define <8 x i16> @cmpgt_zext_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: cmpgt_zext_v8i16:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtw %xmm1, %xmm0
; SSE-NEXT: psrlw $15, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: cmpgt_zext_v8i16:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -416,7 +416,7 @@ define <8 x i16> @cmpgt_zext_v8i16(<8 x
define <8 x i32> @cmpgt_zext_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE-LABEL: cmpgt_zext_v8i32:
-; SSE: # BB#0:
+; SSE: # %bb.0:
; SSE-NEXT: pcmpgtd %xmm2, %xmm0
; SSE-NEXT: psrld $31, %xmm0
; SSE-NEXT: pcmpgtd %xmm3, %xmm1
@@ -424,7 +424,7 @@ define <8 x i32> @cmpgt_zext_v8i32(<8 x
; SSE-NEXT: retq
;
; AVX1-LABEL: cmpgt_zext_v8i32:
-; AVX1: # BB#0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
@@ -434,7 +434,7 @@ define <8 x i32> @cmpgt_zext_v8i32(<8 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: cmpgt_zext_v8i32:
-; AVX2: # BB#0:
+; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrld $31, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -445,7 +445,7 @@ define <8 x i32> @cmpgt_zext_v8i32(<8 x
define <2 x i64> @cmpgt_zext_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: cmpgt_zext_v2i64:
-; SSE2: # BB#0:
+; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm0
@@ -461,13 +461,13 @@ define <2 x i64> @cmpgt_zext_v2i64(<2 x
; SSE2-NEXT: retq
;
; SSE42-LABEL: cmpgt_zext_v2i64:
-; SSE42: # BB#0:
+; SSE42: # %bb.0:
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
; SSE42-NEXT: psrlq $63, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: cmpgt_zext_v2i64:
-; AVX: # BB#0:
+; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrlq $63, %xmm0, %xmm0
; AVX-NEXT: retq
More information about the llvm-commits
mailing list