[llvm] r319665 - [CodeGen] Unify MBB reference format in both MIR and debug output
Francis Visoiu Mistrih via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 4 09:18:56 PST 2017
Modified: llvm/trunk/test/CodeGen/X86/avx1-logical-load-folding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx1-logical-load-folding.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx1-logical-load-folding.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx1-logical-load-folding.ll Mon Dec 4 09:18:51 2017
@@ -5,7 +5,7 @@
; Function Attrs: nounwind ssp uwtable
define void @test1(float* %A, float* %C) #0 {
; X86-LABEL: test1:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: vmovaps (%ecx), %ymm0
@@ -15,7 +15,7 @@ define void @test1(float* %A, float* %C)
; X86-NEXT: retl
;
; X64-LABEL: test1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovaps (%rdi), %ymm0
; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vmovss %xmm0, (%rsi)
@@ -34,7 +34,7 @@ define void @test1(float* %A, float* %C)
; Function Attrs: nounwind ssp uwtable
define void @test2(float* %A, float* %C) #0 {
; X86-LABEL: test2:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: vmovaps (%ecx), %ymm0
@@ -44,7 +44,7 @@ define void @test2(float* %A, float* %C)
; X86-NEXT: retl
;
; X64-LABEL: test2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovaps (%rdi), %ymm0
; X64-NEXT: vorps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vmovss %xmm0, (%rsi)
@@ -63,7 +63,7 @@ define void @test2(float* %A, float* %C)
; Function Attrs: nounwind ssp uwtable
define void @test3(float* %A, float* %C) #0 {
; X86-LABEL: test3:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: vmovaps (%ecx), %ymm0
@@ -73,7 +73,7 @@ define void @test3(float* %A, float* %C)
; X86-NEXT: retl
;
; X64-LABEL: test3:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovaps (%rdi), %ymm0
; X64-NEXT: vxorps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vmovss %xmm0, (%rsi)
@@ -91,7 +91,7 @@ define void @test3(float* %A, float* %C)
define void @test4(float* %A, float* %C) #0 {
; X86-LABEL: test4:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: vmovaps (%ecx), %ymm0
@@ -101,7 +101,7 @@ define void @test4(float* %A, float* %C)
; X86-NEXT: retl
;
; X64-LABEL: test4:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovaps (%rdi), %ymm0
; X64-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vmovss %xmm0, (%rsi)
Modified: llvm/trunk/test/CodeGen/X86/avx2-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-arith.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-arith.ll Mon Dec 4 09:18:51 2017
@@ -4,12 +4,12 @@
define <4 x i64> @test_vpaddq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: test_vpaddq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpaddq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = add <4 x i64> %i, %j
@@ -18,12 +18,12 @@ define <4 x i64> @test_vpaddq(<4 x i64>
define <8 x i32> @test_vpaddd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: test_vpaddd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpaddd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = add <8 x i32> %i, %j
@@ -32,12 +32,12 @@ define <8 x i32> @test_vpaddd(<8 x i32>
define <16 x i16> @test_vpaddw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: test_vpaddw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpaddw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = add <16 x i16> %i, %j
@@ -46,12 +46,12 @@ define <16 x i16> @test_vpaddw(<16 x i16
define <32 x i8> @test_vpaddb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: test_vpaddb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpaddb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = add <32 x i8> %i, %j
@@ -60,12 +60,12 @@ define <32 x i8> @test_vpaddb(<32 x i8>
define <4 x i64> @test_vpsubq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: test_vpsubq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpsubq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = sub <4 x i64> %i, %j
@@ -74,12 +74,12 @@ define <4 x i64> @test_vpsubq(<4 x i64>
define <8 x i32> @test_vpsubd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: test_vpsubd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpsubd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = sub <8 x i32> %i, %j
@@ -88,12 +88,12 @@ define <8 x i32> @test_vpsubd(<8 x i32>
define <16 x i16> @test_vpsubw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: test_vpsubw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpsubw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = sub <16 x i16> %i, %j
@@ -102,12 +102,12 @@ define <16 x i16> @test_vpsubw(<16 x i16
define <32 x i8> @test_vpsubb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: test_vpsubb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpsubb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = sub <32 x i8> %i, %j
@@ -116,12 +116,12 @@ define <32 x i8> @test_vpsubb(<32 x i8>
define <8 x i32> @test_vpmulld(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: test_vpmulld:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpmulld:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = mul <8 x i32> %i, %j
@@ -130,12 +130,12 @@ define <8 x i32> @test_vpmulld(<8 x i32>
define <16 x i16> @test_vpmullw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: test_vpmullw:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpmullw:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = mul <16 x i16> %i, %j
@@ -144,7 +144,7 @@ define <16 x i16> @test_vpmullw(<16 x i1
define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
; X32-LABEL: mul_v16i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovsxbw %xmm1, %ymm1
; X32-NEXT: vpmovsxbw %xmm0, %ymm0
; X32-NEXT: vpmullw %ymm1, %ymm0, %ymm0
@@ -157,7 +157,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i
; X32-NEXT: retl
;
; X64-LABEL: mul_v16i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxbw %xmm1, %ymm1
; X64-NEXT: vpmovsxbw %xmm0, %ymm0
; X64-NEXT: vpmullw %ymm1, %ymm0, %ymm0
@@ -174,7 +174,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i
define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: mul_v32i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vextracti128 $1, %ymm1, %xmm2
; X32-NEXT: vpmovsxbw %xmm2, %ymm2
; X32-NEXT: vextracti128 $1, %ymm0, %xmm3
@@ -196,7 +196,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i
; X32-NEXT: retl
;
; X64-LABEL: mul_v32i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextracti128 $1, %ymm1, %xmm2
; X64-NEXT: vpmovsxbw %xmm2, %ymm2
; X64-NEXT: vextracti128 $1, %ymm0, %xmm3
@@ -222,7 +222,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i
define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: mul_v4i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlq $32, %ymm0, %ymm2
; X32-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
; X32-NEXT: vpsrlq $32, %ymm1, %ymm3
@@ -234,7 +234,7 @@ define <4 x i64> @mul_v4i64(<4 x i64> %i
; X32-NEXT: retl
;
; X64-LABEL: mul_v4i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlq $32, %ymm0, %ymm2
; X64-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
; X64-NEXT: vpsrlq $32, %ymm1, %ymm3
@@ -250,12 +250,12 @@ define <4 x i64> @mul_v4i64(<4 x i64> %i
define <8 x i32> @mul_const1(<8 x i32> %x) {
; X32-LABEL: mul_const1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <8 x i32> %x, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -264,12 +264,12 @@ define <8 x i32> @mul_const1(<8 x i32> %
define <4 x i64> @mul_const2(<4 x i64> %x) {
; X32-LABEL: mul_const2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllq $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllq $2, %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <4 x i64> %x, <i64 4, i64 4, i64 4, i64 4>
@@ -278,12 +278,12 @@ define <4 x i64> @mul_const2(<4 x i64> %
define <16 x i16> @mul_const3(<16 x i16> %x) {
; X32-LABEL: mul_const3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $3, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $3, %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <16 x i16> %x, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -292,13 +292,13 @@ define <16 x i16> @mul_const3(<16 x i16>
define <4 x i64> @mul_const4(<4 x i64> %x) {
; X32-LABEL: mul_const4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpsubq %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpsubq %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -308,12 +308,12 @@ define <4 x i64> @mul_const4(<4 x i64> %
define <8 x i32> @mul_const5(<8 x i32> %x) {
; X32-LABEL: mul_const5:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%y = mul <8 x i32> %x, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -322,12 +322,12 @@ define <8 x i32> @mul_const5(<8 x i32> %
define <8 x i32> @mul_const6(<8 x i32> %x) {
; X32-LABEL: mul_const6:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmulld {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const6:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmulld {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <8 x i32> %x, <i32 0, i32 0, i32 0, i32 2, i32 0, i32 2, i32 0, i32 0>
@@ -336,13 +336,13 @@ define <8 x i32> @mul_const6(<8 x i32> %
define <8 x i64> @mul_const7(<8 x i64> %x) {
; X32-LABEL: mul_const7:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; X32-NEXT: vpaddq %ymm1, %ymm1, %ymm1
; X32-NEXT: retl
;
; X64-LABEL: mul_const7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; X64-NEXT: vpaddq %ymm1, %ymm1, %ymm1
; X64-NEXT: retq
@@ -352,12 +352,12 @@ define <8 x i64> @mul_const7(<8 x i64> %
define <8 x i16> @mul_const8(<8 x i16> %x) {
; X32-LABEL: mul_const8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $3, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $3, %xmm0, %xmm0
; X64-NEXT: retq
%y = mul <8 x i16> %x, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -366,14 +366,14 @@ define <8 x i16> @mul_const8(<8 x i16> %
define <8 x i32> @mul_const9(<8 x i32> %x) {
; X32-LABEL: mul_const9:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl $2, %eax
; X32-NEXT: vmovd %eax, %xmm1
; X32-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: movl $2, %eax
; X64-NEXT: vmovd %eax, %xmm1
; X64-NEXT: vpmulld %ymm1, %ymm0, %ymm0
@@ -385,13 +385,13 @@ define <8 x i32> @mul_const9(<8 x i32> %
; %x * 0x01010101
define <4 x i32> @mul_const10(<4 x i32> %x) {
; X32-LABEL: mul_const10:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastd {{.*#+}} xmm1 = [16843009,16843009,16843009,16843009]
; X32-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const10:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [16843009,16843009,16843009,16843009]
; X64-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
@@ -402,13 +402,13 @@ define <4 x i32> @mul_const10(<4 x i32>
; %x * 0x80808080
define <4 x i32> @mul_const11(<4 x i32> %x) {
; X32-LABEL: mul_const11:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2155905152,2155905152,2155905152,2155905152]
; X32-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const11:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2155905152,2155905152,2155905152,2155905152]
; X64-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/avx2-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-cmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-cmp.ll Mon Dec 4 09:18:51 2017
@@ -4,12 +4,12 @@
define <8 x i32> @v8i32_cmpgt(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: v8i32_cmpgt:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v8i32_cmpgt:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
%bincmp = icmp slt <8 x i32> %i, %j
@@ -19,12 +19,12 @@ define <8 x i32> @v8i32_cmpgt(<8 x i32>
define <4 x i64> @v4i64_cmpgt(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: v4i64_cmpgt:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v4i64_cmpgt:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
%bincmp = icmp slt <4 x i64> %i, %j
@@ -34,12 +34,12 @@ define <4 x i64> @v4i64_cmpgt(<4 x i64>
define <16 x i16> @v16i16_cmpgt(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: v16i16_cmpgt:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v16i16_cmpgt:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
%bincmp = icmp slt <16 x i16> %i, %j
@@ -49,12 +49,12 @@ define <16 x i16> @v16i16_cmpgt(<16 x i1
define <32 x i8> @v32i8_cmpgt(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: v32i8_cmpgt:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v32i8_cmpgt:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
%bincmp = icmp slt <32 x i8> %i, %j
@@ -64,12 +64,12 @@ define <32 x i8> @v32i8_cmpgt(<32 x i8>
define <8 x i32> @int256_cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: int256_cmpeq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: int256_cmpeq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%bincmp = icmp eq <8 x i32> %i, %j
@@ -79,12 +79,12 @@ define <8 x i32> @int256_cmpeq(<8 x i32>
define <4 x i64> @v4i64_cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: v4i64_cmpeq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v4i64_cmpeq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%bincmp = icmp eq <4 x i64> %i, %j
@@ -94,12 +94,12 @@ define <4 x i64> @v4i64_cmpeq(<4 x i64>
define <16 x i16> @v16i16_cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: v16i16_cmpeq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v16i16_cmpeq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%bincmp = icmp eq <16 x i16> %i, %j
@@ -109,12 +109,12 @@ define <16 x i16> @v16i16_cmpeq(<16 x i1
define <32 x i8> @v32i8_cmpeq(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: v32i8_cmpeq:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v32i8_cmpeq:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%bincmp = icmp eq <32 x i8> %i, %j
Modified: llvm/trunk/test/CodeGen/X86/avx2-conversions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-conversions.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-conversions.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-conversions.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define <4 x i32> @trunc4(<4 x i64> %A) nounwind {
; X32-LABEL: trunc4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; X32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -12,7 +12,7 @@ define <4 x i32> @trunc4(<4 x i64> %A) n
; X32-NEXT: retl
;
; X64-LABEL: trunc4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -24,7 +24,7 @@ define <4 x i32> @trunc4(<4 x i64> %A) n
define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
; X32-LABEL: trunc8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -32,7 +32,7 @@ define <8 x i16> @trunc8(<8 x i32> %A) n
; X32-NEXT: retl
;
; X64-LABEL: trunc8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -44,12 +44,12 @@ define <8 x i16> @trunc8(<8 x i32> %A) n
define <4 x i64> @sext4(<4 x i32> %A) nounwind {
; X32-LABEL: sext4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovsxdq %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxdq %xmm0, %ymm0
; X64-NEXT: retq
%B = sext <4 x i32> %A to <4 x i64>
@@ -58,12 +58,12 @@ define <4 x i64> @sext4(<4 x i32> %A) no
define <8 x i32> @sext8(<8 x i16> %A) nounwind {
; X32-LABEL: sext8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovsxwd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxwd %xmm0, %ymm0
; X64-NEXT: retq
%B = sext <8 x i16> %A to <8 x i32>
@@ -72,12 +72,12 @@ define <8 x i32> @sext8(<8 x i16> %A) no
define <4 x i64> @zext4(<4 x i32> %A) nounwind {
; X32-LABEL: zext4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X32-NEXT: retl
;
; X64-LABEL: zext4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X64-NEXT: retq
%B = zext <4 x i32> %A to <4 x i64>
@@ -86,12 +86,12 @@ define <4 x i64> @zext4(<4 x i32> %A) no
define <8 x i32> @zext8(<8 x i16> %A) nounwind {
; X32-LABEL: zext8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: retl
;
; X64-LABEL: zext8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: retq
%B = zext <8 x i16> %A to <8 x i32>
@@ -100,13 +100,13 @@ define <8 x i32> @zext8(<8 x i16> %A) no
define <8 x i32> @zext_8i8_8i32(<8 x i8> %A) nounwind {
; X32-LABEL: zext_8i8_8i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: retl
;
; X64-LABEL: zext_8i8_8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: retq
@@ -116,12 +116,12 @@ define <8 x i32> @zext_8i8_8i32(<8 x i8>
define <16 x i16> @zext_16i8_16i16(<16 x i8> %z) {
; X32-LABEL: zext_16i8_16i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; X32-NEXT: retl
;
; X64-LABEL: zext_16i8_16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; X64-NEXT: retq
%t = zext <16 x i8> %z to <16 x i16>
@@ -130,12 +130,12 @@ define <16 x i16> @zext_16i8_16i16(<16 x
define <16 x i16> @sext_16i8_16i16(<16 x i8> %z) {
; X32-LABEL: sext_16i8_16i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovsxbw %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext_16i8_16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxbw %xmm0, %ymm0
; X64-NEXT: retq
%t = sext <16 x i8> %z to <16 x i16>
@@ -144,7 +144,7 @@ define <16 x i16> @sext_16i8_16i16(<16 x
define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
; X32-LABEL: trunc_16i16_16i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vextracti128 $1, %ymm0, %xmm1
; X32-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; X32-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -154,7 +154,7 @@ define <16 x i8> @trunc_16i16_16i8(<16 x
; X32-NEXT: retl
;
; X64-LABEL: trunc_16i16_16i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; X64-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -168,13 +168,13 @@ define <16 x i8> @trunc_16i16_16i8(<16 x
define <4 x i64> @load_sext_test1(<4 x i32> *%ptr) {
; X32-LABEL: load_sext_test1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxdq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxdq (%rdi), %ymm0
; X64-NEXT: retq
%X = load <4 x i32>, <4 x i32>* %ptr
@@ -184,13 +184,13 @@ define <4 x i64> @load_sext_test1(<4 x i
define <4 x i64> @load_sext_test2(<4 x i8> *%ptr) {
; X32-LABEL: load_sext_test2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxbq (%rdi), %ymm0
; X64-NEXT: retq
%X = load <4 x i8>, <4 x i8>* %ptr
@@ -200,13 +200,13 @@ define <4 x i64> @load_sext_test2(<4 x i
define <4 x i64> @load_sext_test3(<4 x i16> *%ptr) {
; X32-LABEL: load_sext_test3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxwq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxwq (%rdi), %ymm0
; X64-NEXT: retq
%X = load <4 x i16>, <4 x i16>* %ptr
@@ -216,13 +216,13 @@ define <4 x i64> @load_sext_test3(<4 x i
define <8 x i32> @load_sext_test4(<8 x i16> *%ptr) {
; X32-LABEL: load_sext_test4:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxwd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test4:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxwd (%rdi), %ymm0
; X64-NEXT: retq
%X = load <8 x i16>, <8 x i16>* %ptr
@@ -232,13 +232,13 @@ define <8 x i32> @load_sext_test4(<8 x i
define <8 x i32> @load_sext_test5(<8 x i8> *%ptr) {
; X32-LABEL: load_sext_test5:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test5:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovsxbd (%rdi), %ymm0
; X64-NEXT: retq
%X = load <8 x i8>, <8 x i8>* %ptr
Modified: llvm/trunk/test/CodeGen/X86/avx2-fma-fneg-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-fma-fneg-combine.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-fma-fneg-combine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-fma-fneg-combine.ll Mon Dec 4 09:18:51 2017
@@ -6,12 +6,12 @@
define <8 x float> @test1(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; X32-LABEL: test1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; X64-NEXT: retq
entry:
@@ -24,12 +24,12 @@ declare <8 x float> @llvm.x86.fma.vfmadd
define <4 x float> @test2(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; X32-LABEL: test2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0
; X64-NEXT: retq
entry:
@@ -42,14 +42,14 @@ declare <4 x float> @llvm.x86.fma.vfmadd
define <4 x float> @test3(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; X32-LABEL: test3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0
; X32-NEXT: vbroadcastss {{\.LCPI.*}}, %xmm1
; X32-NEXT: vxorps %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0
; X64-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; X64-NEXT: vxorps %xmm1, %xmm0, %xmm0
@@ -64,12 +64,12 @@ declare <4 x float> @llvm.x86.fma.vfnmad
define <8 x float> @test4(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; X32-LABEL: test4:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test4:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; X64-NEXT: retq
entry:
@@ -80,14 +80,14 @@ entry:
define <8 x float> @test5(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; X32-LABEL: test5:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vbroadcastss {{\.LCPI.*}}, %ymm3
; X32-NEXT: vxorps %ymm3, %ymm2, %ymm2
; X32-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test5:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vbroadcastss {{.*}}(%rip), %ymm3
; X64-NEXT: vxorps %ymm3, %ymm2, %ymm2
; X64-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
@@ -103,12 +103,12 @@ declare <8 x float> @llvm.x86.fma.vfmsub
define <2 x double> @test6(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; X32-LABEL: test6:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test6:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0
; X64-NEXT: retq
entry:
Modified: llvm/trunk/test/CodeGen/X86/avx2-gather.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-gather.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-gather.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-gather.ll Mon Dec 4 09:18:51 2017
@@ -7,7 +7,7 @@ declare <4 x float> @llvm.x86.avx2.gathe
define <4 x float> @test_x86_avx2_gather_d_ps(i8* %a1, <4 x i32> %idx, <4 x float> %mask) {
; X32-LABEL: test_x86_avx2_gather_d_ps:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherdps %xmm1, (%eax,%xmm0,2), %xmm2
@@ -15,7 +15,7 @@ define <4 x float> @test_x86_avx2_gather
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherdps %xmm1, (%rdi,%xmm0,2), %xmm2
; X64-NEXT: vmovaps %xmm2, %xmm0
@@ -30,7 +30,7 @@ declare <2 x double> @llvm.x86.avx2.gath
define <2 x double> @test_x86_avx2_gather_d_pd(i8* %a1, <4 x i32> %idx, <2 x double> %mask) {
; X32-LABEL: test_x86_avx2_gather_d_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherdpd %xmm1, (%eax,%xmm0,2), %xmm2
@@ -38,7 +38,7 @@ define <2 x double> @test_x86_avx2_gathe
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherdpd %xmm1, (%rdi,%xmm0,2), %xmm2
; X64-NEXT: vmovapd %xmm2, %xmm0
@@ -53,7 +53,7 @@ declare <8 x float> @llvm.x86.avx2.gathe
define <8 x float> @test_x86_avx2_gather_d_ps_256(i8* %a1, <8 x i32> %idx, <8 x float> %mask) {
; X32-LABEL: test_x86_avx2_gather_d_ps_256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherdps %ymm1, (%eax,%ymm0,4), %ymm2
@@ -61,7 +61,7 @@ define <8 x float> @test_x86_avx2_gather
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_ps_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherdps %ymm1, (%rdi,%ymm0,4), %ymm2
; X64-NEXT: vmovaps %ymm2, %ymm0
@@ -76,7 +76,7 @@ declare <4 x double> @llvm.x86.avx2.gath
define <4 x double> @test_x86_avx2_gather_d_pd_256(i8* %a1, <4 x i32> %idx, <4 x double> %mask) {
; X32-LABEL: test_x86_avx2_gather_d_pd_256:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherdpd %ymm1, (%eax,%xmm0,8), %ymm2
@@ -84,7 +84,7 @@ define <4 x double> @test_x86_avx2_gathe
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_pd_256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherdpd %ymm1, (%rdi,%xmm0,8), %ymm2
; X64-NEXT: vmovapd %ymm2, %ymm0
@@ -96,7 +96,7 @@ define <4 x double> @test_x86_avx2_gathe
define <2 x i64> @test_mm_i32gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_i32gather_epi32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -105,7 +105,7 @@ define <2 x i64> @test_mm_i32gather_epi3
; X32-NEXT: retl
;
; X64-LABEL: test_mm_i32gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherdd %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -122,7 +122,7 @@ declare <4 x i32> @llvm.x86.avx2.gather.
define <2 x double> @test_mm_i32gather_pd(double *%a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_i32gather_pd:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
@@ -131,7 +131,7 @@ define <2 x double> @test_mm_i32gather_p
; X32-NEXT: retl
;
; X64-LABEL: test_mm_i32gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm0,2), %xmm1
Modified: llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@
define <4 x i64> @test_mm256_abs_epi8(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_abs_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpabsb %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg = bitcast <4 x i64> %a0 to <32 x i8>
@@ -20,7 +20,7 @@ declare <32 x i8> @llvm.x86.avx2.pabs.b(
define <4 x i64> @test_mm256_abs_epi16(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_abs_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpabsw %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg = bitcast <4 x i64> %a0 to <16 x i16>
@@ -34,7 +34,7 @@ declare <16 x i16> @llvm.x86.avx2.pabs.w
define <4 x i64> @test_mm256_abs_epi32(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_abs_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpabsd %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg = bitcast <4 x i64> %a0 to <8 x i32>
@@ -48,7 +48,7 @@ declare <8 x i32> @llvm.x86.avx2.pabs.d(
define <4 x i64> @test_mm256_add_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_add_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -60,7 +60,7 @@ define <4 x i64> @test_mm256_add_epi8(<4
define <4 x i64> @test_mm256_add_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_add_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -72,7 +72,7 @@ define <4 x i64> @test_mm256_add_epi16(<
define <4 x i64> @test_mm256_add_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_add_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -84,7 +84,7 @@ define <4 x i64> @test_mm256_add_epi32(<
define <4 x i64> @test_mm256_add_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_add_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = add <4 x i64> %a0, %a1
@@ -93,7 +93,7 @@ define <4 x i64> @test_mm256_add_epi64(<
define <4 x i64> @test_mm256_adds_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_adds_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddsb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -106,7 +106,7 @@ declare <32 x i8> @llvm.x86.avx2.padds.b
define <4 x i64> @test_mm256_adds_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_adds_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -119,7 +119,7 @@ declare <16 x i16> @llvm.x86.avx2.padds.
define <4 x i64> @test_mm256_adds_epu8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_adds_epu8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddusb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -132,7 +132,7 @@ declare <32 x i8> @llvm.x86.avx2.paddus.
define <4 x i64> @test_mm256_adds_epu16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_adds_epu16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddusw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -145,7 +145,7 @@ declare <16 x i16> @llvm.x86.avx2.paddus
define <4 x i64> @test_mm256_alignr_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_alignr_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm1[0,1],ymm0[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm1[16,17]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -157,7 +157,7 @@ define <4 x i64> @test_mm256_alignr_epi8
define <4 x i64> @test2_mm256_alignr_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test2_mm256_alignr_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm1[0],ymm0[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm1[16]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -169,7 +169,7 @@ define <4 x i64> @test2_mm256_alignr_epi
define <4 x i64> @test_mm256_and_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_and_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vandps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = and <4 x i64> %a0, %a1
@@ -178,7 +178,7 @@ define <4 x i64> @test_mm256_and_si256(<
define <4 x i64> @test_mm256_andnot_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_andnot_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpxor %ymm2, %ymm0, %ymm0
; CHECK-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -190,7 +190,7 @@ define <4 x i64> @test_mm256_andnot_si25
define <4 x i64> @test_mm256_avg_epu8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_avg_epu8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpavgb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -207,7 +207,7 @@ define <4 x i64> @test_mm256_avg_epu8(<4
define <4 x i64> @test_mm256_avg_epu16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_avg_epu16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpavgw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -224,7 +224,7 @@ define <4 x i64> @test_mm256_avg_epu16(<
define <4 x i64> @test_mm256_blend_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_blend_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7,8],ymm1[9],ymm0[10,11,12,13,14,15]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -236,7 +236,7 @@ define <4 x i64> @test_mm256_blend_epi16
define <2 x i64> @test_mm_blend_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm_blend_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -248,7 +248,7 @@ define <2 x i64> @test_mm_blend_epi32(<2
define <4 x i64> @test_mm256_blend_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_blend_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -260,7 +260,7 @@ define <4 x i64> @test_mm256_blend_epi32
define <4 x i64> @test_mm256_blendv_epi8(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2) {
; CHECK-LABEL: test_mm256_blendv_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -274,7 +274,7 @@ declare <32 x i8> @llvm.x86.avx2.pblendv
define <2 x i64> @test_mm_broadcastb_epi8(<2 x i64> %a0) {
; CHECK-LABEL: test_mm_broadcastb_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastb %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -285,7 +285,7 @@ define <2 x i64> @test_mm_broadcastb_epi
define <4 x i64> @test_mm256_broadcastb_epi8(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_broadcastb_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastb %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -296,7 +296,7 @@ define <4 x i64> @test_mm256_broadcastb_
define <2 x i64> @test_mm_broadcastd_epi32(<2 x i64> %a0) {
; CHECK-LABEL: test_mm_broadcastd_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -307,7 +307,7 @@ define <2 x i64> @test_mm_broadcastd_epi
define <4 x i64> @test_mm256_broadcastd_epi32(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_broadcastd_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -318,7 +318,7 @@ define <4 x i64> @test_mm256_broadcastd_
define <2 x i64> @test_mm_broadcastq_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm_broadcastq_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -327,7 +327,7 @@ define <2 x i64> @test_mm_broadcastq_epi
define <4 x i64> @test_mm256_broadcastq_epi64(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_broadcastq_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> zeroinitializer
@@ -336,7 +336,7 @@ define <4 x i64> @test_mm256_broadcastq_
define <2 x double> @test_mm_broadcastsd_pd(<2 x double> %a0) {
; CHECK-LABEL: test_mm_broadcastsd_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> zeroinitializer
@@ -345,7 +345,7 @@ define <2 x double> @test_mm_broadcastsd
define <4 x double> @test_mm256_broadcastsd_pd(<4 x double> %a0) {
; CHECK-LABEL: test_mm256_broadcastsd_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> zeroinitializer
@@ -354,7 +354,7 @@ define <4 x double> @test_mm256_broadcas
define <4 x i64> @test_mm256_broadcastsi128_si256(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_broadcastsi128_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
@@ -364,13 +364,13 @@ define <4 x i64> @test_mm256_broadcastsi
define <4 x i64> @test_mm256_broadcastsi128_si256_mem(<2 x i64>* %p0) {
; X86-LABEL: test_mm256_broadcastsi128_si256_mem:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_broadcastsi128_si256_mem:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: ret{{[l|q]}}
%a0 = load <2 x i64>, <2 x i64>* %p0
@@ -380,7 +380,7 @@ define <4 x i64> @test_mm256_broadcastsi
define <4 x float> @test_mm_broadcastss_ps(<4 x float> %a0) {
; CHECK-LABEL: test_mm_broadcastss_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> zeroinitializer
@@ -389,7 +389,7 @@ define <4 x float> @test_mm_broadcastss_
define <8 x float> @test_mm256_broadcastss_ps(<8 x float> %a0) {
; CHECK-LABEL: test_mm256_broadcastss_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> zeroinitializer
@@ -398,7 +398,7 @@ define <8 x float> @test_mm256_broadcast
define <2 x i64> @test_mm_broadcastw_epi16(<2 x i64> %a0) {
; CHECK-LABEL: test_mm_broadcastw_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastw %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -409,7 +409,7 @@ define <2 x i64> @test_mm_broadcastw_epi
define <4 x i64> @test_mm256_broadcastw_epi16(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_broadcastw_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastw %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -420,7 +420,7 @@ define <4 x i64> @test_mm256_broadcastw_
define <4 x i64> @test_mm256_bslli_epi128(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_bslli_epi128:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -431,7 +431,7 @@ define <4 x i64> @test_mm256_bslli_epi12
define <4 x i64> @test_mm256_bsrli_epi128(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_bsrli_epi128:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -442,7 +442,7 @@ define <4 x i64> @test_mm256_bsrli_epi12
define <4 x i64> @test_mm256_cmpeq_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_cmpeq_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -455,7 +455,7 @@ define <4 x i64> @test_mm256_cmpeq_epi8(
define <4 x i64> @test_mm256_cmpeq_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_cmpeq_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -468,7 +468,7 @@ define <4 x i64> @test_mm256_cmpeq_epi16
define <4 x i64> @test_mm256_cmpeq_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_cmpeq_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -481,7 +481,7 @@ define <4 x i64> @test_mm256_cmpeq_epi32
define <4 x i64> @test_mm256_cmpeq_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_cmpeq_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%cmp = icmp eq <4 x i64> %a0, %a1
@@ -491,7 +491,7 @@ define <4 x i64> @test_mm256_cmpeq_epi64
define <4 x i64> @test_mm256_cmpgt_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_cmpgt_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -504,7 +504,7 @@ define <4 x i64> @test_mm256_cmpgt_epi8(
define <4 x i64> @test_mm256_cmpgt_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_cmpgt_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -517,7 +517,7 @@ define <4 x i64> @test_mm256_cmpgt_epi16
define <4 x i64> @test_mm256_cmpgt_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_cmpgt_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -530,7 +530,7 @@ define <4 x i64> @test_mm256_cmpgt_epi32
define <4 x i64> @test_mm256_cmpgt_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_cmpgt_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%cmp = icmp sgt <4 x i64> %a0, %a1
@@ -540,7 +540,7 @@ define <4 x i64> @test_mm256_cmpgt_epi64
define <4 x i64> @test_mm256_cvtepi8_epi16(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepi8_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxbw %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -551,7 +551,7 @@ define <4 x i64> @test_mm256_cvtepi8_epi
define <4 x i64> @test_mm256_cvtepi8_epi32(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepi8_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxbd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -563,7 +563,7 @@ define <4 x i64> @test_mm256_cvtepi8_epi
define <4 x i64> @test_mm256_cvtepi8_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepi8_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxbq %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -574,7 +574,7 @@ define <4 x i64> @test_mm256_cvtepi8_epi
define <4 x i64> @test_mm256_cvtepi16_epi32(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepi16_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxwd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -585,7 +585,7 @@ define <4 x i64> @test_mm256_cvtepi16_ep
define <4 x i64> @test_mm256_cvtepi16_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepi16_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxwq %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -596,7 +596,7 @@ define <4 x i64> @test_mm256_cvtepi16_ep
define <4 x i64> @test_mm256_cvtepi32_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepi32_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovsxdq %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -606,7 +606,7 @@ define <4 x i64> @test_mm256_cvtepi32_ep
define <4 x i64> @test_mm256_cvtepu8_epi16(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepu8_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -617,7 +617,7 @@ define <4 x i64> @test_mm256_cvtepu8_epi
define <4 x i64> @test_mm256_cvtepu8_epi32(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepu8_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -629,7 +629,7 @@ define <4 x i64> @test_mm256_cvtepu8_epi
define <4 x i64> @test_mm256_cvtepu8_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepu8_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
@@ -640,7 +640,7 @@ define <4 x i64> @test_mm256_cvtepu8_epi
define <4 x i64> @test_mm256_cvtepu16_epi32(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepu16_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -651,7 +651,7 @@ define <4 x i64> @test_mm256_cvtepu16_ep
define <4 x i64> @test_mm256_cvtepu16_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepu16_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
@@ -662,7 +662,7 @@ define <4 x i64> @test_mm256_cvtepu16_ep
define <4 x i64> @test_mm256_cvtepu32_epi64(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_cvtepu32_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -672,7 +672,7 @@ define <4 x i64> @test_mm256_cvtepu32_ep
define <2 x i64> @test_mm256_extracti128_si256(<4 x i64> %a0) nounwind {
; CHECK-LABEL: test_mm256_extracti128_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
@@ -682,7 +682,7 @@ define <2 x i64> @test_mm256_extracti128
define <4 x i64> @test_mm256_hadd_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_hadd_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -695,7 +695,7 @@ declare <16 x i16> @llvm.x86.avx2.phadd.
define <4 x i64> @test_mm256_hadd_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_hadd_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -708,7 +708,7 @@ declare <8 x i32> @llvm.x86.avx2.phadd.d
define <4 x i64> @test_mm256_hadds_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_hadds_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphaddsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -721,7 +721,7 @@ declare <16 x i16> @llvm.x86.avx2.phadd.
define <4 x i64> @test_mm256_hsub_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_hsub_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphsubw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -734,7 +734,7 @@ declare <16 x i16> @llvm.x86.avx2.phsub.
define <4 x i64> @test_mm256_hsub_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_hsub_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -747,7 +747,7 @@ declare <8 x i32> @llvm.x86.avx2.phsub.d
define <4 x i64> @test_mm256_hsubs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_hsubs_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vphsubsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -760,7 +760,7 @@ declare <16 x i16> @llvm.x86.avx2.phsub.
define <2 x i64> @test_mm_i32gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i32gather_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -769,7 +769,7 @@ define <2 x i64> @test_mm_i32gather_epi3
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_i32gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherdd %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -786,13 +786,13 @@ declare <4 x i32> @llvm.x86.avx2.gather.
define <2 x i64> @test_mm_mask_i32gather_epi32(<2 x i64> %a0, i32 *%a1, <2 x i64> %a2, <2 x i64> %a3) {
; X86-LABEL: test_mm_mask_i32gather_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpgatherdd %xmm2, (%eax,%xmm1,2), %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_mask_i32gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -806,7 +806,7 @@ define <2 x i64> @test_mm_mask_i32gather
define <4 x i64> @test_mm256_i32gather_epi32(i32 *%a0, <4 x i64> %a1) {
; X86-LABEL: test_mm256_i32gather_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -815,7 +815,7 @@ define <4 x i64> @test_mm256_i32gather_e
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_i32gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherdd %ymm2, (%rdi,%ymm0,2), %ymm1
@@ -832,13 +832,13 @@ declare <8 x i32> @llvm.x86.avx2.gather.
define <4 x i64> @test_mm256_mask_i32gather_epi32(<4 x i64> %a0, i32 *%a1, <4 x i64> %a2, <4 x i64> %a3) {
; X86-LABEL: test_mm256_mask_i32gather_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpgatherdd %ymm2, (%eax,%ymm1,2), %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_mask_i32gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -852,7 +852,7 @@ define <4 x i64> @test_mm256_mask_i32gat
define <2 x i64> @test_mm_i32gather_epi64(i64 *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i32gather_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -861,7 +861,7 @@ define <2 x i64> @test_mm_i32gather_epi6
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_i32gather_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherdq %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -876,13 +876,13 @@ declare <2 x i64> @llvm.x86.avx2.gather.
define <2 x i64> @test_mm_mask_i32gather_epi64(<2 x i64> %a0, i64 *%a1, <2 x i64> %a2, <2 x i64> %a3) {
; X86-LABEL: test_mm_mask_i32gather_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpgatherdq %xmm2, (%eax,%xmm1,2), %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_mask_i32gather_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast i64 *%a1 to i8*
@@ -893,7 +893,7 @@ define <2 x i64> @test_mm_mask_i32gather
define <4 x i64> @test_mm256_i32gather_epi64(i64 *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm256_i32gather_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -902,7 +902,7 @@ define <4 x i64> @test_mm256_i32gather_e
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_i32gather_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherdq %ymm2, (%rdi,%xmm0,2), %ymm1
@@ -917,13 +917,13 @@ declare <4 x i64> @llvm.x86.avx2.gather.
define <4 x i64> @test_mm256_mask_i32gather_epi64(<4 x i64> %a0, i64 *%a1, <2 x i64> %a2, <4 x i64> %a3) {
; X86-LABEL: test_mm256_mask_i32gather_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpgatherdq %ymm2, (%eax,%xmm1,2), %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_mask_i32gather_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast i64 *%a1 to i8*
@@ -934,7 +934,7 @@ define <4 x i64> @test_mm256_mask_i32gat
define <2 x double> @test_mm_i32gather_pd(double *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i32gather_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
@@ -943,7 +943,7 @@ define <2 x double> @test_mm_i32gather_p
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_i32gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -961,13 +961,13 @@ declare <2 x double> @llvm.x86.avx2.gath
define <2 x double> @test_mm_mask_i32gather_pd(<2 x double> %a0, double *%a1, <2 x i64> %a2, <2 x double> %a3) {
; X86-LABEL: test_mm_mask_i32gather_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vgatherdpd %xmm2, (%eax,%xmm1,2), %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_mask_i32gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast double *%a1 to i8*
@@ -978,7 +978,7 @@ define <2 x double> @test_mm_mask_i32gat
define <4 x double> @test_mm256_i32gather_pd(double *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm256_i32gather_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X86-NEXT: vcmpeqpd %ymm1, %ymm1, %ymm2
@@ -987,7 +987,7 @@ define <4 x double> @test_mm256_i32gathe
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_i32gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmpeqpd %ymm1, %ymm1, %ymm2
; X64-NEXT: vgatherdpd %ymm2, (%rdi,%xmm0,2), %ymm1
@@ -1003,13 +1003,13 @@ declare <4 x double> @llvm.x86.avx2.gath
define <4 x double> @test_mm256_mask_i32gather_pd(<4 x double> %a0, double *%a1, <2 x i64> %a2, <4 x double> %a3) {
; X86-LABEL: test_mm256_mask_i32gather_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vgatherdpd %ymm2, (%eax,%xmm1,2), %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_mask_i32gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,2), %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast double *%a1 to i8*
@@ -1020,7 +1020,7 @@ define <4 x double> @test_mm256_mask_i32
define <4 x float> @test_mm_i32gather_ps(float *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i32gather_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -1029,7 +1029,7 @@ define <4 x float> @test_mm_i32gather_ps
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_i32gather_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherdps %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -1047,13 +1047,13 @@ declare <4 x float> @llvm.x86.avx2.gathe
define <4 x float> @test_mm_mask_i32gather_ps(<4 x float> %a0, float *%a1, <2 x i64> %a2, <4 x float> %a3) {
; X86-LABEL: test_mm_mask_i32gather_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vgatherdps %xmm2, (%eax,%xmm1,2), %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_mask_i32gather_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast float *%a1 to i8*
@@ -1064,7 +1064,7 @@ define <4 x float> @test_mm_mask_i32gath
define <8 x float> @test_mm256_i32gather_ps(float *%a0, <4 x i64> %a1) {
; X86-LABEL: test_mm256_i32gather_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-NEXT: vcmpeqps %ymm1, %ymm1, %ymm2
@@ -1073,7 +1073,7 @@ define <8 x float> @test_mm256_i32gather
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_i32gather_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmpeqps %ymm1, %ymm1, %ymm2
; X64-NEXT: vgatherdps %ymm2, (%rdi,%ymm0,2), %ymm1
@@ -1089,13 +1089,13 @@ declare <8 x float> @llvm.x86.avx2.gathe
define <8 x float> @test_mm256_mask_i32gather_ps(<8 x float> %a0, float *%a1, <4 x i64> %a2, <8 x float> %a3) {
; X86-LABEL: test_mm256_mask_i32gather_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vgatherdps %ymm2, (%eax,%ymm1,2), %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_mask_i32gather_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,2), %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast float *%a1 to i8*
@@ -1106,7 +1106,7 @@ define <8 x float> @test_mm256_mask_i32g
define <2 x i64> @test_mm_i64gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i64gather_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -1115,7 +1115,7 @@ define <2 x i64> @test_mm_i64gather_epi3
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_i64gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -1131,13 +1131,13 @@ declare <4 x i32> @llvm.x86.avx2.gather.
define <2 x i64> @test_mm_mask_i64gather_epi32(<2 x i64> %a0, i32 *%a1, <2 x i64> %a2, <2 x i64> %a3) {
; X86-LABEL: test_mm_mask_i64gather_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpgatherqd %xmm2, (%eax,%xmm1,2), %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_mask_i64gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -1150,7 +1150,7 @@ define <2 x i64> @test_mm_mask_i64gather
define <2 x i64> @test_mm256_i64gather_epi32(i32 *%a0, <4 x i64> %a1) {
; X86-LABEL: test_mm256_i64gather_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -1160,7 +1160,7 @@ define <2 x i64> @test_mm256_i64gather_e
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_i64gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%ymm0,2), %xmm1
@@ -1177,14 +1177,14 @@ declare <4 x i32> @llvm.x86.avx2.gather.
define <2 x i64> @test_mm256_mask_i64gather_epi32(<2 x i64> %a0, i32 *%a1, <4 x i64> %a2, <2 x i64> %a3) {
; X86-LABEL: test_mm256_mask_i64gather_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpgatherqd %xmm2, (%eax,%ymm1,2), %xmm0
; X86-NEXT: vzeroupper
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_mask_i64gather_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: ret{{[l|q]}}
@@ -1198,7 +1198,7 @@ define <2 x i64> @test_mm256_mask_i64gat
define <2 x i64> @test_mm_i64gather_epi64(i64 *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i64gather_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -1207,7 +1207,7 @@ define <2 x i64> @test_mm_i64gather_epi6
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_i64gather_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherqq %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -1221,13 +1221,13 @@ declare <2 x i64> @llvm.x86.avx2.gather.
define <2 x i64> @test_mm_mask_i64gather_epi64(<2 x i64> %a0, i64 *%a1, <2 x i64> %a2, <2 x i64> %a3) {
; X86-LABEL: test_mm_mask_i64gather_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpgatherqq %xmm2, (%eax,%xmm1,2), %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_mask_i64gather_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast i64 *%a1 to i8*
@@ -1237,7 +1237,7 @@ define <2 x i64> @test_mm_mask_i64gather
define <4 x i64> @test_mm256_i64gather_epi64(i64 *%a0, <4 x i64> %a1) {
; X86-LABEL: test_mm256_i64gather_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -1246,7 +1246,7 @@ define <4 x i64> @test_mm256_i64gather_e
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_i64gather_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherqq %ymm2, (%rdi,%ymm0,2), %ymm1
@@ -1260,13 +1260,13 @@ declare <4 x i64> @llvm.x86.avx2.gather.
define <4 x i64> @test_mm256_mask_i64gather_epi64(<4 x i64> %a0, i64 *%a1, <4 x i64> %a2, <4 x i64> %a3) {
; X86-LABEL: test_mm256_mask_i64gather_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpgatherqq %ymm2, (%eax,%ymm1,2), %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_mask_i64gather_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast i64 *%a1 to i8*
@@ -1276,7 +1276,7 @@ define <4 x i64> @test_mm256_mask_i64gat
define <2 x double> @test_mm_i64gather_pd(double *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i64gather_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
@@ -1285,7 +1285,7 @@ define <2 x double> @test_mm_i64gather_p
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_i64gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherqpd %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -1302,13 +1302,13 @@ declare <2 x double> @llvm.x86.avx2.gath
define <2 x double> @test_mm_mask_i64gather_pd(<2 x double> %a0, double *%a1, <2 x i64> %a2, <2 x double> %a3) {
; X86-LABEL: test_mm_mask_i64gather_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vgatherqpd %xmm2, (%eax,%xmm1,2), %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_mask_i64gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast double *%a1 to i8*
@@ -1318,7 +1318,7 @@ define <2 x double> @test_mm_mask_i64gat
define <4 x double> @test_mm256_i64gather_pd(double *%a0, <4 x i64> %a1) {
; X86-LABEL: test_mm256_i64gather_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X86-NEXT: vcmpeqpd %ymm1, %ymm1, %ymm2
@@ -1327,7 +1327,7 @@ define <4 x double> @test_mm256_i64gathe
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_i64gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vcmpeqpd %ymm1, %ymm1, %ymm2
; X64-NEXT: vgatherqpd %ymm2, (%rdi,%ymm0,2), %ymm1
@@ -1342,13 +1342,13 @@ declare <4 x double> @llvm.x86.avx2.gath
define <4 x double> @test_mm256_mask_i64gather_pd(<4 x double> %a0, i64 *%a1, <4 x i64> %a2, <4 x double> %a3) {
; X86-LABEL: test_mm256_mask_i64gather_pd:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vgatherqpd %ymm2, (%eax,%ymm1,2), %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_mask_i64gather_pd:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,2), %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast i64 *%a1 to i8*
@@ -1358,7 +1358,7 @@ define <4 x double> @test_mm256_mask_i64
define <4 x float> @test_mm_i64gather_ps(float *%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i64gather_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -1367,7 +1367,7 @@ define <4 x float> @test_mm_i64gather_ps
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_i64gather_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherqps %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -1384,13 +1384,13 @@ declare <4 x float> @llvm.x86.avx2.gathe
define <4 x float> @test_mm_mask_i64gather_ps(<4 x float> %a0, float *%a1, <2 x i64> %a2, <4 x float> %a3) {
; X86-LABEL: test_mm_mask_i64gather_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vgatherqps %xmm2, (%eax,%xmm1,2), %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_mask_i64gather_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg1 = bitcast float *%a1 to i8*
@@ -1400,7 +1400,7 @@ define <4 x float> @test_mm_mask_i64gath
define <4 x float> @test_mm256_i64gather_ps(float *%a0, <4 x i64> %a1) {
; X86-LABEL: test_mm256_i64gather_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -1410,7 +1410,7 @@ define <4 x float> @test_mm256_i64gather
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_i64gather_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherqps %xmm2, (%rdi,%ymm0,2), %xmm1
@@ -1428,14 +1428,14 @@ declare <4 x float> @llvm.x86.avx2.gathe
define <4 x float> @test_mm256_mask_i64gather_ps(<4 x float> %a0, float *%a1, <4 x i64> %a2, <4 x float> %a3) {
; X86-LABEL: test_mm256_mask_i64gather_ps:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vgatherqps %xmm2, (%eax,%ymm1,2), %xmm0
; X86-NEXT: vzeroupper
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_mask_i64gather_ps:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,2), %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: ret{{[l|q]}}
@@ -1446,7 +1446,7 @@ define <4 x float> @test_mm256_mask_i64g
define <4 x i64> @test0_mm256_inserti128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind {
; CHECK-LABEL: test0_mm256_inserti128_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; CHECK-NEXT: ret{{[l|q]}}
@@ -1457,7 +1457,7 @@ define <4 x i64> @test0_mm256_inserti128
define <4 x i64> @test1_mm256_inserti128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind {
; CHECK-LABEL: test1_mm256_inserti128_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%ext = shufflevector <2 x i64> %a1, <2 x i64> %a1, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -1467,7 +1467,7 @@ define <4 x i64> @test1_mm256_inserti128
define <4 x i64> @test_mm256_madd_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_madd_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1480,7 +1480,7 @@ declare <8 x i32> @llvm.x86.avx2.pmadd.w
define <4 x i64> @test_mm256_maddubs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_maddubs_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -1493,13 +1493,13 @@ declare <16 x i16> @llvm.x86.avx2.pmadd.
define <2 x i64> @test_mm_maskload_epi32(i32* %a0, <2 x i64> %a1) nounwind {
; X86-LABEL: test_mm_maskload_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaskmovd (%eax), %xmm0, %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_maskload_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast i32* %a0 to i8*
@@ -1512,13 +1512,13 @@ declare <4 x i32> @llvm.x86.avx2.maskloa
define <4 x i64> @test_mm256_maskload_epi32(i32* %a0, <4 x i64> %a1) nounwind {
; X86-LABEL: test_mm256_maskload_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaskmovd (%eax), %ymm0, %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_maskload_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast i32* %a0 to i8*
@@ -1531,13 +1531,13 @@ declare <8 x i32> @llvm.x86.avx2.maskloa
define <2 x i64> @test_mm_maskload_epi64(i64* %a0, <2 x i64> %a1) nounwind {
; X86-LABEL: test_mm_maskload_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaskmovq (%eax), %xmm0, %xmm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_maskload_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm0
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast i64* %a0 to i8*
@@ -1548,13 +1548,13 @@ declare <2 x i64> @llvm.x86.avx2.maskloa
define <4 x i64> @test_mm256_maskload_epi64(i64* %a0, <4 x i64> %a1) nounwind {
; X86-LABEL: test_mm256_maskload_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaskmovq (%eax), %ymm0, %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_maskload_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast i64* %a0 to i8*
@@ -1565,13 +1565,13 @@ declare <4 x i64> @llvm.x86.avx2.maskloa
define void @test_mm_maskstore_epi32(float* %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
; X86-LABEL: test_mm_maskstore_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaskmovd %xmm1, %xmm0, (%eax)
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_maskstore_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi)
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast float* %a0 to i8*
@@ -1584,14 +1584,14 @@ declare void @llvm.x86.avx2.maskstore.d(
define void @test_mm256_maskstore_epi32(float* %a0, <4 x i64> %a1, <4 x i64> %a2) nounwind {
; X86-LABEL: test_mm256_maskstore_epi32:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaskmovd %ymm1, %ymm0, (%eax)
; X86-NEXT: vzeroupper
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_maskstore_epi32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: ret{{[l|q]}}
@@ -1605,13 +1605,13 @@ declare void @llvm.x86.avx2.maskstore.d.
define void @test_mm_maskstore_epi64(i64* %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
; X86-LABEL: test_mm_maskstore_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaskmovq %xmm1, %xmm0, (%eax)
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm_maskstore_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi)
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast i64* %a0 to i8*
@@ -1622,14 +1622,14 @@ declare void @llvm.x86.avx2.maskstore.q(
define void @test_mm256_maskstore_epi64(i64* %a0, <4 x i64> %a1, <4 x i64> %a2) nounwind {
; X86-LABEL: test_mm256_maskstore_epi64:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaskmovq %ymm1, %ymm0, (%eax)
; X86-NEXT: vzeroupper
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_maskstore_epi64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: ret{{[l|q]}}
@@ -1641,7 +1641,7 @@ declare void @llvm.x86.avx2.maskstore.q.
define <4 x i64> @test_mm256_max_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_max_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -1654,7 +1654,7 @@ define <4 x i64> @test_mm256_max_epi8(<4
define <4 x i64> @test_mm256_max_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_max_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1667,7 +1667,7 @@ define <4 x i64> @test_mm256_max_epi16(<
define <4 x i64> @test_mm256_max_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_max_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1680,7 +1680,7 @@ define <4 x i64> @test_mm256_max_epi32(<
define <4 x i64> @test_mm256_max_epu8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_max_epu8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -1693,7 +1693,7 @@ define <4 x i64> @test_mm256_max_epu8(<4
define <4 x i64> @test_mm256_max_epu16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_max_epu16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1706,7 +1706,7 @@ define <4 x i64> @test_mm256_max_epu16(<
define <4 x i64> @test_mm256_max_epu32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_max_epu32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1719,7 +1719,7 @@ define <4 x i64> @test_mm256_max_epu32(<
define <4 x i64> @test_mm256_min_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_min_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -1732,7 +1732,7 @@ define <4 x i64> @test_mm256_min_epi8(<4
define <4 x i64> @test_mm256_min_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_min_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1745,7 +1745,7 @@ define <4 x i64> @test_mm256_min_epi16(<
define <4 x i64> @test_mm256_min_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_min_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1758,7 +1758,7 @@ define <4 x i64> @test_mm256_min_epi32(<
define <4 x i64> @test_mm256_min_epu8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_min_epu8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminub %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -1771,7 +1771,7 @@ define <4 x i64> @test_mm256_min_epu8(<4
define <4 x i64> @test_mm256_min_epu16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_min_epu16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1784,7 +1784,7 @@ define <4 x i64> @test_mm256_min_epu16(<
define <4 x i64> @test_mm256_min_epu32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_min_epu32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpminud %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1797,7 +1797,7 @@ define <4 x i64> @test_mm256_min_epu32(<
define i32 @test_mm256_movemask_epi8(<4 x i64> %a0) nounwind {
; CHECK-LABEL: test_mm256_movemask_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmovmskb %ymm0, %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
@@ -1809,7 +1809,7 @@ declare i32 @llvm.x86.avx2.pmovmskb(<32
define <4 x i64> @test_mm256_mpsadbw_epu8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_mpsadbw_epu8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vmpsadbw $3, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -1822,7 +1822,7 @@ declare <16 x i16> @llvm.x86.avx2.mpsadb
define <4 x i64> @test_mm256_mul_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_mul_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmuldq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1834,7 +1834,7 @@ declare <4 x i64> @llvm.x86.avx2.pmul.dq
define <4 x i64> @test_mm256_mul_epu32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_mul_epu32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1846,7 +1846,7 @@ declare <4 x i64> @llvm.x86.avx2.pmulu.d
define <4 x i64> @test_mm256_mulhi_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_mulhi_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmulhw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1859,7 +1859,7 @@ declare <16 x i16> @llvm.x86.avx2.pmulh.
define <4 x i64> @test_mm256_mulhi_epu16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_mulhi_epu16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1872,7 +1872,7 @@ declare <16 x i16> @llvm.x86.avx2.pmulhu
define <4 x i64> @test_mm256_mulhrs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_mulhrs_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1885,7 +1885,7 @@ declare <16 x i16> @llvm.x86.avx2.pmul.h
define <4 x i64> @test_mm256_mullo_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_mullo_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1897,7 +1897,7 @@ define <4 x i64> @test_mm256_mullo_epi16
define <4 x i64> @test_mm256_mullo_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_mullo_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1909,7 +1909,7 @@ define <4 x i64> @test_mm256_mullo_epi32
define <4 x i64> @test_mm256_or_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_or_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vorps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = or <4 x i64> %a0, %a1
@@ -1918,7 +1918,7 @@ define <4 x i64> @test_mm256_or_si256(<4
define <4 x i64> @test_mm256_packs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_packs_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1931,7 +1931,7 @@ declare <32 x i8> @llvm.x86.avx2.packssw
define <4 x i64> @test_mm256_packs_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_packs_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1944,7 +1944,7 @@ declare <16 x i16> @llvm.x86.avx2.packss
define <4 x i64> @test_mm256_packus_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_packus_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -1957,7 +1957,7 @@ declare <32 x i8> @llvm.x86.avx2.packusw
define <4 x i64> @test_mm256_packus_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_packus_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -1970,7 +1970,7 @@ declare <16 x i16> @llvm.x86.avx2.packus
define <4 x i64> @test_mm256_permute2x128_si256(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_permute2x128_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
@@ -1980,7 +1980,7 @@ declare <4 x i64> @llvm.x86.avx2.vperm2i
define <4 x i64> @test_mm256_permute4x64_epi64(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_permute4x64_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,0,2,0]
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 0>
@@ -1989,7 +1989,7 @@ define <4 x i64> @test_mm256_permute4x64
define <4 x double> @test_mm256_permute4x64_pd(<4 x double> %a0) {
; CHECK-LABEL: test_mm256_permute4x64_pd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,2,1,0]
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 1, i32 2, i32 1, i32 0>
@@ -1998,7 +1998,7 @@ define <4 x double> @test_mm256_permute4
define <4 x i64> @test_mm256_permutevar8x32_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_permutevar8x32_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2011,7 +2011,7 @@ declare <8 x i32> @llvm.x86.avx2.permd(<
define <8 x float> @test_mm256_permutevar8x32_ps(<8 x float> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_permutevar8x32_ps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
@@ -2022,7 +2022,7 @@ declare <8 x float> @llvm.x86.avx2.permp
define <4 x i64> @test_mm256_sad_epu8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_sad_epu8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2034,7 +2034,7 @@ declare <4 x i64> @llvm.x86.avx2.psad.bw
define <4 x i64> @test_mm256_shuffle_epi32(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_shuffle_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,3,0,0,7,7,4,4]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2045,7 +2045,7 @@ define <4 x i64> @test_mm256_shuffle_epi
define <4 x i64> @test_mm256_shuffle_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_shuffle_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2058,7 +2058,7 @@ declare <32 x i8> @llvm.x86.avx2.pshuf.b
define <4 x i64> @test_mm256_shufflehi_epi16(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_shufflehi_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,6,5,8,9,10,11,15,14,14,13]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2069,7 +2069,7 @@ define <4 x i64> @test_mm256_shufflehi_e
define <4 x i64> @test_mm256_shufflelo_epi16(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_shufflelo_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,0,1,1,4,5,6,7,11,8,9,9,12,13,14,15]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2080,7 +2080,7 @@ define <4 x i64> @test_mm256_shufflelo_e
define <4 x i64> @test_mm256_sign_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_sign_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsignb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2093,7 +2093,7 @@ declare <32 x i8> @llvm.x86.avx2.psign.b
define <4 x i64> @test_mm256_sign_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_sign_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsignw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2106,7 +2106,7 @@ declare <16 x i16> @llvm.x86.avx2.psign.
define <4 x i64> @test_mm256_sign_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_sign_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsignd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2119,7 +2119,7 @@ declare <8 x i32> @llvm.x86.avx2.psign.d
define <4 x i64> @test_mm256_sll_epi16(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm256_sll_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllw %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2132,7 +2132,7 @@ declare <16 x i16> @llvm.x86.avx2.psll.w
define <4 x i64> @test_mm256_sll_epi32(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm256_sll_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslld %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2145,7 +2145,7 @@ declare <8 x i32> @llvm.x86.avx2.psll.d(
define <4 x i64> @test_mm256_sll_epi64(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm256_sll_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllq %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %a0, <2 x i64> %a1)
@@ -2155,7 +2155,7 @@ declare <4 x i64> @llvm.x86.avx2.psll.q(
define <4 x i64> @test_mm256_slli_epi16(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_slli_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllw $3, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2167,7 +2167,7 @@ declare <16 x i16> @llvm.x86.avx2.pslli.
define <4 x i64> @test_mm256_slli_epi32(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_slli_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslld $3, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2179,7 +2179,7 @@ declare <8 x i32> @llvm.x86.avx2.pslli.d
define <4 x i64> @test_mm256_slli_epi64(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_slli_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllq $3, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> %a0, i32 3)
@@ -2189,7 +2189,7 @@ declare <4 x i64> @llvm.x86.avx2.pslli.q
define <4 x i64> @test_mm256_slli_si256(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_slli_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2200,7 +2200,7 @@ define <4 x i64> @test_mm256_slli_si256(
define <2 x i64> @test_mm_sllv_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm_sllv_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -2213,7 +2213,7 @@ declare <4 x i32> @llvm.x86.avx2.psllv.d
define <4 x i64> @test_mm256_sllv_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_sllv_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2226,7 +2226,7 @@ declare <8 x i32> @llvm.x86.avx2.psllv.d
define <2 x i64> @test_mm_sllv_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm_sllv_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %a0, <2 x i64> %a1)
@@ -2236,7 +2236,7 @@ declare <2 x i64> @llvm.x86.avx2.psllv.q
define <4 x i64> @test_mm256_sllv_epi64(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_sllv_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %a0, <4 x i64> %a1)
@@ -2246,7 +2246,7 @@ declare <4 x i64> @llvm.x86.avx2.psllv.q
define <4 x i64> @test_mm256_sra_epi16(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm256_sra_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsraw %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2259,7 +2259,7 @@ declare <16 x i16> @llvm.x86.avx2.psra.w
define <4 x i64> @test_mm256_sra_epi32(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm256_sra_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrad %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2272,7 +2272,7 @@ declare <8 x i32> @llvm.x86.avx2.psra.d(
define <4 x i64> @test_mm256_srai_epi16(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_srai_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsraw $3, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2284,7 +2284,7 @@ declare <16 x i16> @llvm.x86.avx2.psrai.
define <4 x i64> @test_mm256_srai_epi32(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_srai_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrad $3, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2296,7 +2296,7 @@ declare <8 x i32> @llvm.x86.avx2.psrai.d
define <2 x i64> @test_mm_srav_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm_srav_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -2309,7 +2309,7 @@ declare <4 x i32> @llvm.x86.avx2.psrav.d
define <4 x i64> @test_mm256_srav_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_srav_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2322,7 +2322,7 @@ declare <8 x i32> @llvm.x86.avx2.psrav.d
define <4 x i64> @test_mm256_srl_epi16(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm256_srl_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2335,7 +2335,7 @@ declare <16 x i16> @llvm.x86.avx2.psrl.w
define <4 x i64> @test_mm256_srl_epi32(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm256_srl_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrld %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2348,7 +2348,7 @@ declare <8 x i32> @llvm.x86.avx2.psrl.d(
define <4 x i64> @test_mm256_srl_epi64(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm256_srl_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %a0, <2 x i64> %a1)
@@ -2358,7 +2358,7 @@ declare <4 x i64> @llvm.x86.avx2.psrl.q(
define <4 x i64> @test_mm256_srli_epi16(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_srli_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2370,7 +2370,7 @@ declare <16 x i16> @llvm.x86.avx2.psrli.
define <4 x i64> @test_mm256_srli_epi32(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_srli_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrld $3, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2382,7 +2382,7 @@ declare <8 x i32> @llvm.x86.avx2.psrli.d
define <4 x i64> @test_mm256_srli_epi64(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_srli_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlq $3, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %a0, i32 3)
@@ -2392,7 +2392,7 @@ declare <4 x i64> @llvm.x86.avx2.psrli.q
define <4 x i64> @test_mm256_srli_si256(<4 x i64> %a0) {
; CHECK-LABEL: test_mm256_srli_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2403,7 +2403,7 @@ define <4 x i64> @test_mm256_srli_si256(
define <2 x i64> @test_mm_srlv_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm_srlv_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
@@ -2416,7 +2416,7 @@ declare <4 x i32> @llvm.x86.avx2.psrlv.d
define <4 x i64> @test_mm256_srlv_epi32(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_srlv_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2429,7 +2429,7 @@ declare <8 x i32> @llvm.x86.avx2.psrlv.d
define <2 x i64> @test_mm_srlv_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_mm_srlv_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %a0, <2 x i64> %a1)
@@ -2439,7 +2439,7 @@ declare <2 x i64> @llvm.x86.avx2.psrlv.q
define <4 x i64> @test_mm256_srlv_epi64(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_srlv_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %a0, <4 x i64> %a1)
@@ -2449,13 +2449,13 @@ declare <4 x i64> @llvm.x86.avx2.psrlv.q
define <4 x i64> @test_mm256_stream_load_si256(<4 x i64> *%a0) {
; X86-LABEL: test_mm256_stream_load_si256:
-; X86: # BB#0:
+; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovntdqa (%eax), %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_mm256_stream_load_si256:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovntdqa (%rdi), %ymm0
; X64-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> *%a0 to i8*
@@ -2466,7 +2466,7 @@ declare <4 x i64> @llvm.x86.avx2.movntdq
define <4 x i64> @test_mm256_sub_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_sub_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2478,7 +2478,7 @@ define <4 x i64> @test_mm256_sub_epi8(<4
define <4 x i64> @test_mm256_sub_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_sub_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2490,7 +2490,7 @@ define <4 x i64> @test_mm256_sub_epi16(<
define <4 x i64> @test_mm256_sub_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_sub_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2502,7 +2502,7 @@ define <4 x i64> @test_mm256_sub_epi32(<
define <4 x i64> @test_mm256_sub_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_sub_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = sub <4 x i64> %a0, %a1
@@ -2511,7 +2511,7 @@ define <4 x i64> @test_mm256_sub_epi64(<
define <4 x i64> @test_mm256_subs_epi8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_subs_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubsb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2524,7 +2524,7 @@ declare <32 x i8> @llvm.x86.avx2.psubs.b
define <4 x i64> @test_mm256_subs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_subs_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2537,7 +2537,7 @@ declare <16 x i16> @llvm.x86.avx2.psubs.
define <4 x i64> @test_mm256_subs_epu8(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_subs_epu8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubusb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2550,7 +2550,7 @@ declare <32 x i8> @llvm.x86.avx2.psubus.
define <4 x i64> @test_mm256_subs_epu16(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_mm256_subs_epu16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubusw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2563,7 +2563,7 @@ declare <16 x i16> @llvm.x86.avx2.psubus
define <4 x i64> @test_mm256_unpackhi_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpackhi_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2575,7 +2575,7 @@ define <4 x i64> @test_mm256_unpackhi_ep
define <4 x i64> @test_mm256_unpackhi_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpackhi_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2587,7 +2587,7 @@ define <4 x i64> @test_mm256_unpackhi_ep
define <4 x i64> @test_mm256_unpackhi_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpackhi_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2599,7 +2599,7 @@ define <4 x i64> @test_mm256_unpackhi_ep
define <4 x i64> @test_mm256_unpackhi_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpackhi_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -2608,7 +2608,7 @@ define <4 x i64> @test_mm256_unpackhi_ep
define <4 x i64> @test_mm256_unpacklo_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpacklo_epi8:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
@@ -2620,7 +2620,7 @@ define <4 x i64> @test_mm256_unpacklo_ep
define <4 x i64> @test_mm256_unpacklo_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpacklo_epi16:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
@@ -2632,7 +2632,7 @@ define <4 x i64> @test_mm256_unpacklo_ep
define <4 x i64> @test_mm256_unpacklo_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpacklo_epi32:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; CHECK-NEXT: ret{{[l|q]}}
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
@@ -2644,7 +2644,7 @@ define <4 x i64> @test_mm256_unpacklo_ep
define <4 x i64> @test_mm256_unpacklo_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_unpacklo_epi64:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -2653,7 +2653,7 @@ define <4 x i64> @test_mm256_unpacklo_ep
define <4 x i64> @test_mm256_xor_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; CHECK-LABEL: test_mm256_xor_si256:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = xor <4 x i64> %a0, %a1
Modified: llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@
define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_x86_avx2_pblendw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i32 7) ; <<16 x i16>> [#uses=1]
@@ -17,7 +17,7 @@ declare <16 x i16> @llvm.x86.avx2.pblend
define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_avx2_pblendd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i32 7) ; <<4 x i32>> [#uses=1]
@@ -28,7 +28,7 @@ declare <4 x i32> @llvm.x86.avx2.pblendd
define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test_x86_avx2_pblendd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i32 7) ; <<8 x i32>> [#uses=1]
@@ -39,13 +39,13 @@ declare <8 x i32> @llvm.x86.avx2.pblendd
define <4 x i64> @test_x86_avx2_movntdqa(i8* %a0) {
; X86-LABEL: test_x86_avx2_movntdqa:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovntdqa (%eax), %ymm0
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_x86_avx2_movntdqa:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovntdqa (%rdi), %ymm0
; X64-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.movntdqa(i8* %a0) ; <<4 x i64>> [#uses=1]
@@ -56,7 +56,7 @@ declare <4 x i64> @llvm.x86.avx2.movntdq
define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: test_x86_avx2_mpsadbw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %a0, <32 x i8> %a1, i32 7) ; <<16 x i16>> [#uses=1]
@@ -67,7 +67,7 @@ declare <16 x i16> @llvm.x86.avx2.mpsadb
define <4 x i64> @test_x86_avx2_psll_dq_bs(<4 x i64> %a0) {
; CHECK-LABEL: test_x86_avx2_psll_dq_bs:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8],zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psll.dq.bs(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
@@ -78,7 +78,7 @@ declare <4 x i64> @llvm.x86.avx2.psll.dq
define <4 x i64> @test_x86_avx2_psrl_dq_bs(<4 x i64> %a0) {
; CHECK-LABEL: test_x86_avx2_psrl_dq_bs:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,ymm0[23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psrl.dq.bs(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
@@ -89,7 +89,7 @@ declare <4 x i64> @llvm.x86.avx2.psrl.dq
define <4 x i64> @test_x86_avx2_psll_dq(<4 x i64> %a0) {
; CHECK-LABEL: test_x86_avx2_psll_dq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpslldq {{.*#+}} ymm0 = zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psll.dq(<4 x i64> %a0, i32 8) ; <<4 x i64>> [#uses=1]
@@ -100,7 +100,7 @@ declare <4 x i64> @llvm.x86.avx2.psll.dq
define <4 x i64> @test_x86_avx2_psrl_dq(<4 x i64> %a0) {
; CHECK-LABEL: test_x86_avx2_psrl_dq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,ymm0[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.psrl.dq(<4 x i64> %a0, i32 8) ; <<4 x i64>> [#uses=1]
@@ -111,7 +111,7 @@ declare <4 x i64> @llvm.x86.avx2.psrl.dq
define <2 x i64> @test_x86_avx2_vextracti128(<4 x i64> %a0) {
; CHECK-LABEL: test_x86_avx2_vextracti128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
@@ -123,7 +123,7 @@ declare <2 x i64> @llvm.x86.avx2.vextrac
define <4 x i64> @test_x86_avx2_vinserti128(<4 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_x86_avx2_vinserti128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.vinserti128(<4 x i64> %a0, <2 x i64> %a1, i8 7)
@@ -134,7 +134,7 @@ declare <4 x i64> @llvm.x86.avx2.vinsert
define <4 x double> @test_x86_avx2_vbroadcast_sd_pd_256(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx2_vbroadcast_sd_pd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x double> @llvm.x86.avx2.vbroadcast.sd.pd.256(<2 x double> %a0)
@@ -145,7 +145,7 @@ declare <4 x double> @llvm.x86.avx2.vbro
define <4 x float> @test_x86_avx2_vbroadcast_ss_ps(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx2_vbroadcast_ss_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x float> @llvm.x86.avx2.vbroadcast.ss.ps(<4 x float> %a0)
@@ -156,7 +156,7 @@ declare <4 x float> @llvm.x86.avx2.vbroa
define <8 x float> @test_x86_avx2_vbroadcast_ss_ps_256(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx2_vbroadcast_ss_ps_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x float> @llvm.x86.avx2.vbroadcast.ss.ps.256(<4 x float> %a0)
@@ -167,7 +167,7 @@ declare <8 x float> @llvm.x86.avx2.vbroa
define <16 x i8> @test_x86_avx2_pbroadcastb_128(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastb %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i8> @llvm.x86.avx2.pbroadcastb.128(<16 x i8> %a0)
@@ -178,7 +178,7 @@ declare <16 x i8> @llvm.x86.avx2.pbroadc
define <32 x i8> @test_x86_avx2_pbroadcastb_256(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastb %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <32 x i8> @llvm.x86.avx2.pbroadcastb.256(<16 x i8> %a0)
@@ -189,7 +189,7 @@ declare <32 x i8> @llvm.x86.avx2.pbroadc
define <8 x i16> @test_x86_avx2_pbroadcastw_128(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastw_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastw %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i16> @llvm.x86.avx2.pbroadcastw.128(<8 x i16> %a0)
@@ -200,7 +200,7 @@ declare <8 x i16> @llvm.x86.avx2.pbroadc
define <16 x i16> @test_x86_avx2_pbroadcastw_256(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastw_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastw %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pbroadcastw.256(<8 x i16> %a0)
@@ -211,7 +211,7 @@ declare <16 x i16> @llvm.x86.avx2.pbroad
define <4 x i32> @test_x86_avx2_pbroadcastd_128(<4 x i32> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i32> @llvm.x86.avx2.pbroadcastd.128(<4 x i32> %a0)
@@ -222,7 +222,7 @@ declare <4 x i32> @llvm.x86.avx2.pbroadc
define <8 x i32> @test_x86_avx2_pbroadcastd_256(<4 x i32> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pbroadcastd.256(<4 x i32> %a0)
@@ -233,7 +233,7 @@ declare <8 x i32> @llvm.x86.avx2.pbroadc
define <2 x i64> @test_x86_avx2_pbroadcastq_128(<2 x i64> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastq_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x i64> @llvm.x86.avx2.pbroadcastq.128(<2 x i64> %a0)
@@ -244,7 +244,7 @@ declare <2 x i64> @llvm.x86.avx2.pbroadc
define <4 x i64> @test_x86_avx2_pbroadcastq_256(<2 x i64> %a0) {
; CHECK-LABEL: test_x86_avx2_pbroadcastq_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.pbroadcastq.256(<2 x i64> %a0)
@@ -255,7 +255,7 @@ declare <4 x i64> @llvm.x86.avx2.pbroadc
define <8 x i32> @test_x86_avx2_pmovsxbd(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovsxbd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxbd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pmovsxbd(<16 x i8> %a0) ; <<8 x i32>> [#uses=1]
@@ -266,7 +266,7 @@ declare <8 x i32> @llvm.x86.avx2.pmovsxb
define <4 x i64> @test_x86_avx2_pmovsxbq(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovsxbq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxbq %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.pmovsxbq(<16 x i8> %a0) ; <<4 x i64>> [#uses=1]
@@ -277,7 +277,7 @@ declare <4 x i64> @llvm.x86.avx2.pmovsxb
define <16 x i16> @test_x86_avx2_pmovsxbw(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovsxbw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxbw %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pmovsxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
@@ -288,7 +288,7 @@ declare <16 x i16> @llvm.x86.avx2.pmovsx
define <4 x i64> @test_x86_avx2_pmovsxdq(<4 x i32> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovsxdq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxdq %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.pmovsxdq(<4 x i32> %a0) ; <<4 x i64>> [#uses=1]
@@ -299,7 +299,7 @@ declare <4 x i64> @llvm.x86.avx2.pmovsxd
define <8 x i32> @test_x86_avx2_pmovsxwd(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovsxwd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxwd %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pmovsxwd(<8 x i16> %a0) ; <<8 x i32>> [#uses=1]
@@ -310,7 +310,7 @@ declare <8 x i32> @llvm.x86.avx2.pmovsxw
define <4 x i64> @test_x86_avx2_pmovsxwq(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovsxwq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovsxwq %xmm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.pmovsxwq(<8 x i16> %a0) ; <<4 x i64>> [#uses=1]
@@ -321,7 +321,7 @@ declare <4 x i64> @llvm.x86.avx2.pmovsxw
define <8 x i32> @test_x86_avx2_pmovzxbd(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovzxbd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pmovzxbd(<16 x i8> %a0) ; <<8 x i32>> [#uses=1]
@@ -332,7 +332,7 @@ declare <8 x i32> @llvm.x86.avx2.pmovzxb
define <4 x i64> @test_x86_avx2_pmovzxbq(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovzxbq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.pmovzxbq(<16 x i8> %a0) ; <<4 x i64>> [#uses=1]
@@ -343,7 +343,7 @@ declare <4 x i64> @llvm.x86.avx2.pmovzxb
define <16 x i16> @test_x86_avx2_pmovzxbw(<16 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovzxbw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pmovzxbw(<16 x i8> %a0) ; <<16 x i16>> [#uses=1]
@@ -354,7 +354,7 @@ declare <16 x i16> @llvm.x86.avx2.pmovzx
define <4 x i64> @test_x86_avx2_pmovzxdq(<4 x i32> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovzxdq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.pmovzxdq(<4 x i32> %a0) ; <<4 x i64>> [#uses=1]
@@ -365,7 +365,7 @@ declare <4 x i64> @llvm.x86.avx2.pmovzxd
define <8 x i32> @test_x86_avx2_pmovzxwd(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovzxwd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pmovzxwd(<8 x i16> %a0) ; <<8 x i32>> [#uses=1]
@@ -376,7 +376,7 @@ declare <8 x i32> @llvm.x86.avx2.pmovzxw
define <4 x i64> @test_x86_avx2_pmovzxwq(<8 x i16> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovzxwq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.pmovzxwq(<8 x i16> %a0) ; <<4 x i64>> [#uses=1]
@@ -388,7 +388,7 @@ declare <4 x i64> @llvm.x86.avx2.pmovzxw
define void @test_x86_avx_storeu_dq_256(i8* %a0, <32 x i8> %a1) {
; add operation forces the execution domain.
; X86-LABEL: test_x86_avx_storeu_dq_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X86-NEXT: vpsubb %ymm1, %ymm0, %ymm0
@@ -397,7 +397,7 @@ define void @test_x86_avx_storeu_dq_256(
; X86-NEXT: ret{{[l|q]}}
;
; X64-LABEL: test_x86_avx_storeu_dq_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X64-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; X64-NEXT: vmovdqu %ymm0, (%rdi)
@@ -411,7 +411,7 @@ declare void @llvm.x86.avx.storeu.dq.256
define <32 x i8> @mm256_max_epi8(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: mm256_max_epi8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8> %a0, <32 x i8> %a1)
@@ -421,7 +421,7 @@ declare <32 x i8> @llvm.x86.avx2.pmaxs.b
define <16 x i16> @mm256_max_epi16(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: mm256_max_epi16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16> %a0, <16 x i16> %a1)
@@ -431,7 +431,7 @@ declare <16 x i16> @llvm.x86.avx2.pmaxs.
define <8 x i32> @mm256_max_epi32(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: mm256_max_epi32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32> %a0, <8 x i32> %a1)
@@ -441,7 +441,7 @@ declare <8 x i32> @llvm.x86.avx2.pmaxs.d
define <32 x i8> @mm256_max_epu8(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: mm256_max_epu8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8> %a0, <32 x i8> %a1)
@@ -451,7 +451,7 @@ declare <32 x i8> @llvm.x86.avx2.pmaxu.b
define <16 x i16> @mm256_max_epu16(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: mm256_max_epu16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16> %a0, <16 x i16> %a1)
@@ -461,7 +461,7 @@ declare <16 x i16> @llvm.x86.avx2.pmaxu.
define <8 x i32> @mm256_max_epu32(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: mm256_max_epu32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32> %a0, <8 x i32> %a1)
@@ -471,7 +471,7 @@ declare <8 x i32> @llvm.x86.avx2.pmaxu.d
define <32 x i8> @mm256_min_epi8(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: mm256_min_epi8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8> %a0, <32 x i8> %a1)
@@ -481,7 +481,7 @@ declare <32 x i8> @llvm.x86.avx2.pmins.b
define <16 x i16> @mm256_min_epi16(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: mm256_min_epi16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminsw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16> %a0, <16 x i16> %a1)
@@ -491,7 +491,7 @@ declare <16 x i16> @llvm.x86.avx2.pmins.
define <8 x i32> @mm256_min_epi32(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: mm256_min_epi32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminsd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32> %a0, <8 x i32> %a1)
@@ -501,7 +501,7 @@ declare <8 x i32> @llvm.x86.avx2.pmins.d
define <32 x i8> @mm256_min_epu8(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: mm256_min_epu8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminub %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8> %a0, <32 x i8> %a1)
@@ -511,7 +511,7 @@ declare <32 x i8> @llvm.x86.avx2.pminu.b
define <16 x i16> @mm256_min_epu16(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: mm256_min_epu16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminuw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16> %a0, <16 x i16> %a1)
@@ -521,7 +521,7 @@ declare <16 x i16> @llvm.x86.avx2.pminu.
define <8 x i32> @mm256_min_epu32(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: mm256_min_epu32:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpminud %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32> %a0, <8 x i32> %a1)
@@ -531,7 +531,7 @@ declare <8 x i32> @llvm.x86.avx2.pminu.d
define <32 x i8> @mm256_avg_epu8(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: mm256_avg_epu8:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpavgb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -541,7 +541,7 @@ declare <32 x i8> @llvm.x86.avx2.pavg.b(
define <16 x i16> @mm256_avg_epu16(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: mm256_avg_epu16:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpavgw %ymm1, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -551,7 +551,7 @@ declare <16 x i16> @llvm.x86.avx2.pavg.w
define <32 x i8> @test_x86_avx2_pabs_b(<32 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pabs_b:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpabsb %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %a0) ; <<32 x i8>> [#uses=1]
@@ -561,7 +561,7 @@ declare <32 x i8> @llvm.x86.avx2.pabs.b(
define <8 x i32> @test_x86_avx2_pabs_d(<8 x i32> %a0) {
; CHECK-LABEL: test_x86_avx2_pabs_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpabsd %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %a0) ; <<8 x i32>> [#uses=1]
@@ -572,7 +572,7 @@ declare <8 x i32> @llvm.x86.avx2.pabs.d(
define <16 x i16> @test_x86_avx2_pabs_w(<16 x i16> %a0) {
; CHECK-LABEL: test_x86_avx2_pabs_w:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpabsw %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %a0) ; <<16 x i16>> [#uses=1]
@@ -583,7 +583,7 @@ declare <16 x i16> @llvm.x86.avx2.pabs.w
define <4 x i64> @test_x86_avx2_vperm2i128(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_x86_avx2_vperm2i128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <4 x i64> @llvm.x86.avx2.vperm2i128(<4 x i64> %a0, <4 x i64> %a1, i8 1) ; <<4 x i64>> [#uses=1]
Modified: llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86.ll Mon Dec 4 09:18:51 2017
@@ -6,12 +6,12 @@
define <16 x i16> @test_x86_avx2_packssdw(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_packssdw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x6b,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_packssdw:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6b,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a0, <8 x i32> %a1) ; <<16 x i16>> [#uses=1]
@@ -22,28 +22,28 @@ declare <16 x i16> @llvm.x86.avx2.packss
define <16 x i16> @test_x86_avx2_packssdw_fold() {
; X86-AVX-LABEL: test_x86_avx2_packssdw_fold:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI1_0, kind: FK_Data_4
; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_packssdw_fold:
-; X86-AVX512VL: ## BB#0:
+; X86-AVX512VL: ## %bb.0:
; X86-AVX512VL-NEXT: vmovaps LCPI1_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI1_0, kind: FK_Data_4
; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_packssdw_fold:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI1_0-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_packssdw_fold:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI1_0-4, kind: reloc_riprel_4byte
@@ -55,12 +55,12 @@ define <16 x i16> @test_x86_avx2_packssd
define <32 x i8> @test_x86_avx2_packsswb(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_packsswb:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x63,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_packsswb:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x63,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1) ; <<32 x i8>> [#uses=1]
@@ -71,28 +71,28 @@ declare <32 x i8> @llvm.x86.avx2.packssw
define <32 x i8> @test_x86_avx2_packsswb_fold() {
; X86-AVX-LABEL: test_x86_avx2_packsswb_fold:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI3_0, kind: FK_Data_4
; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_packsswb_fold:
-; X86-AVX512VL: ## BB#0:
+; X86-AVX512VL: ## %bb.0:
; X86-AVX512VL-NEXT: vmovaps LCPI3_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI3_0, kind: FK_Data_4
; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_packsswb_fold:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_packsswb_fold:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI3_0-4, kind: reloc_riprel_4byte
@@ -104,12 +104,12 @@ define <32 x i8> @test_x86_avx2_packsswb
define <32 x i8> @test_x86_avx2_packuswb(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_packuswb:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x67,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_packuswb:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x67,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a0, <16 x i16> %a1) ; <<32 x i8>> [#uses=1]
@@ -120,28 +120,28 @@ declare <32 x i8> @llvm.x86.avx2.packusw
define <32 x i8> @test_x86_avx2_packuswb_fold() {
; X86-AVX-LABEL: test_x86_avx2_packuswb_fold:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI5_0, kind: FK_Data_4
; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_packuswb_fold:
-; X86-AVX512VL: ## BB#0:
+; X86-AVX512VL: ## %bb.0:
; X86-AVX512VL-NEXT: vmovaps LCPI5_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI5_0, kind: FK_Data_4
; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_packuswb_fold:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI5_0-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_packuswb_fold:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI5_0-4, kind: reloc_riprel_4byte
@@ -153,12 +153,12 @@ define <32 x i8> @test_x86_avx2_packuswb
define <32 x i8> @test_x86_avx2_padds_b(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_padds_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xec,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_padds_b:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xec,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -169,12 +169,12 @@ declare <32 x i8> @llvm.x86.avx2.padds.b
define <16 x i16> @test_x86_avx2_padds_w(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_padds_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xed,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_padds_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xed,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -185,12 +185,12 @@ declare <16 x i16> @llvm.x86.avx2.padds.
define <32 x i8> @test_x86_avx2_paddus_b(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_paddus_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xdc,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_paddus_b:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdc,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -201,12 +201,12 @@ declare <32 x i8> @llvm.x86.avx2.paddus.
define <16 x i16> @test_x86_avx2_paddus_w(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_paddus_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xdd,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_paddus_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xdd,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -217,12 +217,12 @@ declare <16 x i16> @llvm.x86.avx2.paddus
define <8 x i32> @test_x86_avx2_pmadd_wd(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_pmadd_wd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf5,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmadd_wd:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf5,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a0, <16 x i16> %a1) ; <<8 x i32>> [#uses=1]
@@ -233,12 +233,12 @@ declare <8 x i32> @llvm.x86.avx2.pmadd.w
define <16 x i16> @test_x86_avx2_pmaxs_w(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_pmaxs_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xee,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmaxs_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xee,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -249,12 +249,12 @@ declare <16 x i16> @llvm.x86.avx2.pmaxs.
define <32 x i8> @test_x86_avx2_pmaxu_b(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_pmaxu_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xde,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmaxu_b:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xde,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -265,12 +265,12 @@ declare <32 x i8> @llvm.x86.avx2.pmaxu.b
define <16 x i16> @test_x86_avx2_pmins_w(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_pmins_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xea,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmins_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpminsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xea,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -281,12 +281,12 @@ declare <16 x i16> @llvm.x86.avx2.pmins.
define <32 x i8> @test_x86_avx2_pminu_b(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_pminu_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xda,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pminu_b:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpminub %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xda,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -297,7 +297,7 @@ declare <32 x i8> @llvm.x86.avx2.pminu.b
define i32 @test_x86_avx2_pmovmskb(<32 x i8> %a0) {
; CHECK-LABEL: test_x86_avx2_pmovmskb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovmskb %ymm0, %eax ## encoding: [0xc5,0xfd,0xd7,0xc0]
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -309,12 +309,12 @@ declare i32 @llvm.x86.avx2.pmovmskb(<32
define <16 x i16> @test_x86_avx2_pmulh_w(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_pmulh_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe5,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmulh_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe5,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -325,12 +325,12 @@ declare <16 x i16> @llvm.x86.avx2.pmulh.
define <16 x i16> @test_x86_avx2_pmulhu_w(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_pmulhu_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe4,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmulhu_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe4,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -341,12 +341,12 @@ declare <16 x i16> @llvm.x86.avx2.pmulhu
define <4 x i64> @test_x86_avx2_pmulu_dq(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_pmulu_dq:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf4,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmulu_dq:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf4,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> %a0, <8 x i32> %a1) ; <<4 x i64>> [#uses=1]
@@ -357,12 +357,12 @@ declare <4 x i64> @llvm.x86.avx2.pmulu.d
define <4 x i64> @test_x86_avx2_psad_bw(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_psad_bw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf6,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psad_bw:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf6,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8> %a0, <32 x i8> %a1) ; <<4 x i64>> [#uses=1]
@@ -373,12 +373,12 @@ declare <4 x i64> @llvm.x86.avx2.psad.bw
define <8 x i32> @test_x86_avx2_psll_d(<8 x i32> %a0, <4 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psll_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpslld %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf2,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psll_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpslld %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf2,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -389,12 +389,12 @@ declare <8 x i32> @llvm.x86.avx2.psll.d(
define <4 x i64> @test_x86_avx2_psll_q(<4 x i64> %a0, <2 x i64> %a1) {
; AVX2-LABEL: test_x86_avx2_psll_q:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf3,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psll_q:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsllq %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf3,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %a0, <2 x i64> %a1) ; <<4 x i64>> [#uses=1]
@@ -405,12 +405,12 @@ declare <4 x i64> @llvm.x86.avx2.psll.q(
define <16 x i16> @test_x86_avx2_psll_w(<16 x i16> %a0, <8 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_psll_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xf1,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psll_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsllw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xf1,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -421,12 +421,12 @@ declare <16 x i16> @llvm.x86.avx2.psll.w
define <8 x i32> @test_x86_avx2_pslli_d(<8 x i32> %a0) {
; AVX2-LABEL: test_x86_avx2_pslli_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpslld $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xf0,0x07]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pslli_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpslld $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xf0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1]
@@ -437,12 +437,12 @@ declare <8 x i32> @llvm.x86.avx2.pslli.d
define <4 x i64> @test_x86_avx2_pslli_q(<4 x i64> %a0) {
; AVX2-LABEL: test_x86_avx2_pslli_q:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllq $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x73,0xf0,0x07]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pslli_q:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsllq $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x73,0xf0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
@@ -453,12 +453,12 @@ declare <4 x i64> @llvm.x86.avx2.pslli.q
define <16 x i16> @test_x86_avx2_pslli_w(<16 x i16> %a0) {
; AVX2-LABEL: test_x86_avx2_pslli_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xf0,0x07]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pslli_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsllw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xf0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1]
@@ -469,12 +469,12 @@ declare <16 x i16> @llvm.x86.avx2.pslli.
define <8 x i32> @test_x86_avx2_psra_d(<8 x i32> %a0, <4 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psra_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrad %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe2,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psra_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrad %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe2,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -485,12 +485,12 @@ declare <8 x i32> @llvm.x86.avx2.psra.d(
define <16 x i16> @test_x86_avx2_psra_w(<16 x i16> %a0, <8 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_psra_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsraw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe1,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psra_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsraw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe1,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -501,12 +501,12 @@ declare <16 x i16> @llvm.x86.avx2.psra.w
define <8 x i32> @test_x86_avx2_psrai_d(<8 x i32> %a0) {
; AVX2-LABEL: test_x86_avx2_psrai_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrad $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xe0,0x07]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrai_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrad $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xe0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1]
@@ -517,12 +517,12 @@ declare <8 x i32> @llvm.x86.avx2.psrai.d
define <16 x i16> @test_x86_avx2_psrai_w(<16 x i16> %a0) {
; AVX2-LABEL: test_x86_avx2_psrai_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsraw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xe0,0x07]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrai_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsraw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xe0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1]
@@ -533,12 +533,12 @@ declare <16 x i16> @llvm.x86.avx2.psrai.
define <8 x i32> @test_x86_avx2_psrl_d(<8 x i32> %a0, <4 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psrl_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrld %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd2,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrl_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrld %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd2,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -549,12 +549,12 @@ declare <8 x i32> @llvm.x86.avx2.psrl.d(
define <4 x i64> @test_x86_avx2_psrl_q(<4 x i64> %a0, <2 x i64> %a1) {
; AVX2-LABEL: test_x86_avx2_psrl_q:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd3,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrl_q:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd3,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %a0, <2 x i64> %a1) ; <<4 x i64>> [#uses=1]
@@ -565,12 +565,12 @@ declare <4 x i64> @llvm.x86.avx2.psrl.q(
define <16 x i16> @test_x86_avx2_psrl_w(<16 x i16> %a0, <8 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_psrl_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd1,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrl_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -581,12 +581,12 @@ declare <16 x i16> @llvm.x86.avx2.psrl.w
define <8 x i32> @test_x86_avx2_psrli_d(<8 x i32> %a0) {
; AVX2-LABEL: test_x86_avx2_psrli_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrld $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x72,0xd0,0x07]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrli_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrld $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x72,0xd0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1]
@@ -597,12 +597,12 @@ declare <8 x i32> @llvm.x86.avx2.psrli.d
define <4 x i64> @test_x86_avx2_psrli_q(<4 x i64> %a0) {
; AVX2-LABEL: test_x86_avx2_psrli_q:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlq $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x73,0xd0,0x07]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrli_q:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrlq $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x73,0xd0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
@@ -613,12 +613,12 @@ declare <4 x i64> @llvm.x86.avx2.psrli.q
define <16 x i16> @test_x86_avx2_psrli_w(<16 x i16> %a0) {
; AVX2-LABEL: test_x86_avx2_psrli_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0x71,0xd0,0x07]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrli_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x71,0xd0,0x07]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1]
@@ -629,12 +629,12 @@ declare <16 x i16> @llvm.x86.avx2.psrli.
define <32 x i8> @test_x86_avx2_psubs_b(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_psubs_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe8,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psubs_b:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe8,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -645,12 +645,12 @@ declare <32 x i8> @llvm.x86.avx2.psubs.b
define <16 x i16> @test_x86_avx2_psubs_w(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_psubs_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xe9,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psubs_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe9,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -661,12 +661,12 @@ declare <16 x i16> @llvm.x86.avx2.psubs.
define <32 x i8> @test_x86_avx2_psubus_b(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_psubus_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd8,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psubus_b:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd8,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -677,12 +677,12 @@ declare <32 x i8> @llvm.x86.avx2.psubus.
define <16 x i16> @test_x86_avx2_psubus_w(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_psubus_w:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd9,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psubus_w:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd9,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -692,7 +692,7 @@ declare <16 x i16> @llvm.x86.avx2.psubus
define <8 x i32> @test_x86_avx2_phadd_d(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test_x86_avx2_phadd_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vphaddd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x02,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -703,7 +703,7 @@ declare <8 x i32> @llvm.x86.avx2.phadd.d
define <16 x i16> @test_x86_avx2_phadd_sw(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_x86_avx2_phadd_sw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x03,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -714,7 +714,7 @@ declare <16 x i16> @llvm.x86.avx2.phadd.
define <16 x i16> @test_x86_avx2_phadd_w(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_x86_avx2_phadd_w:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vphaddw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x01,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -725,7 +725,7 @@ declare <16 x i16> @llvm.x86.avx2.phadd.
define <8 x i32> @test_x86_avx2_phsub_d(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test_x86_avx2_phsub_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vphsubd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x06,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -736,7 +736,7 @@ declare <8 x i32> @llvm.x86.avx2.phsub.d
define <16 x i16> @test_x86_avx2_phsub_sw(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_x86_avx2_phsub_sw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x07,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -747,7 +747,7 @@ declare <16 x i16> @llvm.x86.avx2.phsub.
define <16 x i16> @test_x86_avx2_phsub_w(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_x86_avx2_phsub_w:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vphsubw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x05,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -758,12 +758,12 @@ declare <16 x i16> @llvm.x86.avx2.phsub.
define <16 x i16> @test_x86_avx2_pmadd_ub_sw(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_pmadd_ub_sw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x04,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x04,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1) ; <<16 x i16>> [#uses=1]
@@ -774,27 +774,27 @@ declare <16 x i16> @llvm.x86.avx2.pmadd.
; Make sure we don't commute this operation.
define <16 x i16> @test_x86_avx2_pmadd_ub_sw_load_op0(<32 x i8>* %ptr, <32 x i8> %a1) {
; X86-AVX-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX-NEXT: vmovdqa (%eax), %ymm1 ## encoding: [0xc5,0xfd,0x6f,0x08]
; X86-AVX-NEXT: vpmaddubsw %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x04,0xc0]
; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0:
-; X86-AVX512VL: ## BB#0:
+; X86-AVX512VL: ## %bb.0:
; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512VL-NEXT: vmovdqa (%eax), %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x08]
; X86-AVX512VL-NEXT: vpmaddubsw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x04,0xc0]
; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovdqa (%rdi), %ymm1 ## encoding: [0xc5,0xfd,0x6f,0x0f]
; X64-AVX-NEXT: vpmaddubsw %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x04,0xc0]
; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_pmadd_ub_sw_load_op0:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovdqa (%rdi), %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0x0f]
; X64-AVX512VL-NEXT: vpmaddubsw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x04,0xc0]
; X64-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -805,12 +805,12 @@ define <16 x i16> @test_x86_avx2_pmadd_u
define <16 x i16> @test_x86_avx2_pmul_hr_sw(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_pmul_hr_sw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0b,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmul_hr_sw:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x0b,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -821,12 +821,12 @@ declare <16 x i16> @llvm.x86.avx2.pmul.h
define <32 x i8> @test_x86_avx2_pshuf_b(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_pshuf_b:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x00,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pshuf_b:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x00,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -837,7 +837,7 @@ declare <32 x i8> @llvm.x86.avx2.pshuf.b
define <32 x i8> @test_x86_avx2_psign_b(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: test_x86_avx2_psign_b:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsignb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x08,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -848,7 +848,7 @@ declare <32 x i8> @llvm.x86.avx2.psign.b
define <8 x i32> @test_x86_avx2_psign_d(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test_x86_avx2_psign_d:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsignd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x0a,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> %a0, <8 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -859,7 +859,7 @@ declare <8 x i32> @llvm.x86.avx2.psign.d
define <16 x i16> @test_x86_avx2_psign_w(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_x86_avx2_psign_w:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpsignw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x09,0xc1]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -870,7 +870,7 @@ declare <16 x i16> @llvm.x86.avx2.psign.
define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) {
; CHECK-LABEL: test_x86_avx2_mpsadbw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x42,0xc1,0x07]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %a0, <32 x i8> %a1, i8 7) ; <<16 x i16>> [#uses=1]
@@ -881,12 +881,12 @@ declare <16 x i16> @llvm.x86.avx2.mpsadb
define <16 x i16> @test_x86_avx2_packusdw(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_packusdw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x2b,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_packusdw:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x2b,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a0, <8 x i32> %a1) ; <<16 x i16>> [#uses=1]
@@ -897,28 +897,28 @@ declare <16 x i16> @llvm.x86.avx2.packus
define <16 x i16> @test_x86_avx2_packusdw_fold() {
; X86-AVX-LABEL: test_x86_avx2_packusdw_fold:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI55_0, kind: FK_Data_4
; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_packusdw_fold:
-; X86-AVX512VL: ## BB#0:
+; X86-AVX512VL: ## %bb.0:
; X86-AVX512VL-NEXT: vmovaps LCPI55_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI55_0, kind: FK_Data_4
; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_packusdw_fold:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI55_0-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_packusdw_fold:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI55_0-4, kind: reloc_riprel_4byte
@@ -930,7 +930,7 @@ define <16 x i16> @test_x86_avx2_packusd
define <32 x i8> @test_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) {
; CHECK-LABEL: test_x86_avx2_pblendvb:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x4c,0xc1,0x20]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) ; <<32 x i8>> [#uses=1]
@@ -941,7 +941,7 @@ declare <32 x i8> @llvm.x86.avx2.pblendv
define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
; CHECK-LABEL: test_x86_avx2_pblendw:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpblendw $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x0e,0xc1,0x07]
; CHECK-NEXT: ## ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -953,12 +953,12 @@ declare <16 x i16> @llvm.x86.avx2.pblend
define <32 x i8> @test_x86_avx2_pmaxsb(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_pmaxsb:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3c,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmaxsb:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3c,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -969,12 +969,12 @@ declare <32 x i8> @llvm.x86.avx2.pmaxs.b
define <8 x i32> @test_x86_avx2_pmaxsd(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_pmaxsd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3d,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmaxsd:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3d,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -985,12 +985,12 @@ declare <8 x i32> @llvm.x86.avx2.pmaxs.d
define <8 x i32> @test_x86_avx2_pmaxud(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_pmaxud:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3f,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmaxud:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3f,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1001,12 +1001,12 @@ declare <8 x i32> @llvm.x86.avx2.pmaxu.d
define <16 x i16> @test_x86_avx2_pmaxuw(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_pmaxuw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3e,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pmaxuw:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3e,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -1017,12 +1017,12 @@ declare <16 x i16> @llvm.x86.avx2.pmaxu.
define <32 x i8> @test_x86_avx2_pminsb(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-LABEL: test_x86_avx2_pminsb:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x38,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pminsb:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpminsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x38,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -1033,12 +1033,12 @@ declare <32 x i8> @llvm.x86.avx2.pmins.b
define <8 x i32> @test_x86_avx2_pminsd(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_pminsd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x39,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pminsd:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpminsd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x39,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1049,12 +1049,12 @@ declare <8 x i32> @llvm.x86.avx2.pmins.d
define <8 x i32> @test_x86_avx2_pminud(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_pminud:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3b,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pminud:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpminud %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3b,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1065,12 +1065,12 @@ declare <8 x i32> @llvm.x86.avx2.pminu.d
define <16 x i16> @test_x86_avx2_pminuw(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-LABEL: test_x86_avx2_pminuw:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x3a,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_pminuw:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpminuw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x3a,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -1088,7 +1088,7 @@ declare <4 x i64> @llvm.x86.avx2.pmul.dq
define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_avx2_pblendd_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vblendps $8, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x08]
; CHECK-NEXT: ## xmm0 = xmm1[0,1,2],xmm0[3]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -1100,7 +1100,7 @@ declare <4 x i32> @llvm.x86.avx2.pblendd
define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) {
; CHECK-LABEL: test_x86_avx2_pblendd_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vblendps $7, %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x0c,0xc1,0x07]
; CHECK-NEXT: ## ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
; CHECK-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -1115,12 +1115,12 @@ declare <8 x i32> @llvm.x86.avx2.pblendd
; the instruction.
define <8 x i32> @test_x86_avx2_permd(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_permd:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x16,0xc0]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_permd:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1134,12 +1134,12 @@ declare <8 x i32> @llvm.x86.avx2.permd(<
; the instruction.
define <8 x float> @test_x86_avx2_permps(<8 x float> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_permps:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0x16,0xc0]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_permps:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x16,0xc0]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> %a1) ; <<8 x float>> [#uses=1]
@@ -1150,13 +1150,13 @@ declare <8 x float> @llvm.x86.avx2.permp
define <2 x i64> @test_x86_avx2_maskload_q(i8* %a0, <2 x i64> %a1) {
; X86-LABEL: test_x86_avx2_maskload_q:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpmaskmovq (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x8c,0x00]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_maskload_q:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x8c,0x07]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx2.maskload.q(i8* %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -1167,13 +1167,13 @@ declare <2 x i64> @llvm.x86.avx2.maskloa
define <4 x i64> @test_x86_avx2_maskload_q_256(i8* %a0, <4 x i64> %a1) {
; X86-LABEL: test_x86_avx2_maskload_q_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpmaskmovq (%eax), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x8c,0x00]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_maskload_q_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x8c,0x07]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1]
@@ -1184,13 +1184,13 @@ declare <4 x i64> @llvm.x86.avx2.maskloa
define <4 x i32> @test_x86_avx2_maskload_d(i8* %a0, <4 x i32> %a1) {
; X86-LABEL: test_x86_avx2_maskload_d:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpmaskmovd (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x8c,0x00]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_maskload_d:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x8c,0x07]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx2.maskload.d(i8* %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1201,13 +1201,13 @@ declare <4 x i32> @llvm.x86.avx2.maskloa
define <8 x i32> @test_x86_avx2_maskload_d_256(i8* %a0, <8 x i32> %a1) {
; X86-LABEL: test_x86_avx2_maskload_d_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpmaskmovd (%eax), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x8c,0x00]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_maskload_d_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x8c,0x07]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1218,13 +1218,13 @@ declare <8 x i32> @llvm.x86.avx2.maskloa
define void @test_x86_avx2_maskstore_q(i8* %a0, <2 x i64> %a1, <2 x i64> %a2) {
; X86-LABEL: test_x86_avx2_maskstore_q:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpmaskmovq %xmm1, %xmm0, (%eax) ## encoding: [0xc4,0xe2,0xf9,0x8e,0x08]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_maskstore_q:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) ## encoding: [0xc4,0xe2,0xf9,0x8e,0x0f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
call void @llvm.x86.avx2.maskstore.q(i8* %a0, <2 x i64> %a1, <2 x i64> %a2)
@@ -1235,14 +1235,14 @@ declare void @llvm.x86.avx2.maskstore.q(
define void @test_x86_avx2_maskstore_q_256(i8* %a0, <4 x i64> %a1, <4 x i64> %a2) {
; X86-LABEL: test_x86_avx2_maskstore_q_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpmaskmovq %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0xfd,0x8e,0x08]
; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_maskstore_q_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) ## encoding: [0xc4,0xe2,0xfd,0x8e,0x0f]
; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -1254,13 +1254,13 @@ declare void @llvm.x86.avx2.maskstore.q.
define void @test_x86_avx2_maskstore_d(i8* %a0, <4 x i32> %a1, <4 x i32> %a2) {
; X86-LABEL: test_x86_avx2_maskstore_d:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpmaskmovd %xmm1, %xmm0, (%eax) ## encoding: [0xc4,0xe2,0x79,0x8e,0x08]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_maskstore_d:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) ## encoding: [0xc4,0xe2,0x79,0x8e,0x0f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
call void @llvm.x86.avx2.maskstore.d(i8* %a0, <4 x i32> %a1, <4 x i32> %a2)
@@ -1271,14 +1271,14 @@ declare void @llvm.x86.avx2.maskstore.d(
define void @test_x86_avx2_maskstore_d_256(i8* %a0, <8 x i32> %a1, <8 x i32> %a2) {
; X86-LABEL: test_x86_avx2_maskstore_d_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpmaskmovd %ymm1, %ymm0, (%eax) ## encoding: [0xc4,0xe2,0x7d,0x8e,0x08]
; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_maskstore_d_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) ## encoding: [0xc4,0xe2,0x7d,0x8e,0x0f]
; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -1290,12 +1290,12 @@ declare void @llvm.x86.avx2.maskstore.d.
define <4 x i32> @test_x86_avx2_psllv_d(<4 x i32> %a0, <4 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psllv_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x47,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psllv_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1306,12 +1306,12 @@ declare <4 x i32> @llvm.x86.avx2.psllv.d
define <8 x i32> @test_x86_avx2_psllv_d_256(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psllv_d_256:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x47,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psllv_d_256:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1322,12 +1322,12 @@ declare <8 x i32> @llvm.x86.avx2.psllv.d
define <2 x i64> @test_x86_avx2_psllv_q(<2 x i64> %a0, <2 x i64> %a1) {
; AVX2-LABEL: test_x86_avx2_psllv_q:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x47,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psllv_q:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -1338,12 +1338,12 @@ declare <2 x i64> @llvm.x86.avx2.psllv.q
define <4 x i64> @test_x86_avx2_psllv_q_256(<4 x i64> %a0, <4 x i64> %a1) {
; AVX2-LABEL: test_x86_avx2_psllv_q_256:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x47,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psllv_q_256:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1]
@@ -1354,12 +1354,12 @@ declare <4 x i64> @llvm.x86.avx2.psllv.q
define <4 x i32> @test_x86_avx2_psrlv_d(<4 x i32> %a0, <4 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psrlv_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x45,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrlv_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1370,12 +1370,12 @@ declare <4 x i32> @llvm.x86.avx2.psrlv.d
define <8 x i32> @test_x86_avx2_psrlv_d_256(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psrlv_d_256:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x45,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrlv_d_256:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1386,12 +1386,12 @@ declare <8 x i32> @llvm.x86.avx2.psrlv.d
define <2 x i64> @test_x86_avx2_psrlv_q(<2 x i64> %a0, <2 x i64> %a1) {
; AVX2-LABEL: test_x86_avx2_psrlv_q:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0xf9,0x45,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrlv_q:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -1402,12 +1402,12 @@ declare <2 x i64> @llvm.x86.avx2.psrlv.q
define <4 x i64> @test_x86_avx2_psrlv_q_256(<4 x i64> %a0, <4 x i64> %a1) {
; AVX2-LABEL: test_x86_avx2_psrlv_q_256:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0xfd,0x45,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrlv_q_256:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1]
@@ -1418,12 +1418,12 @@ declare <4 x i64> @llvm.x86.avx2.psrlv.q
define <4 x i32> @test_x86_avx2_psrav_d(<4 x i32> %a0, <4 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psrav_d:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrav_d:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1432,7 +1432,7 @@ define <4 x i32> @test_x86_avx2_psrav_d(
define <4 x i32> @test_x86_avx2_psrav_d_const(<4 x i32> %a0, <4 x i32> %a1) {
; X86-AVX-LABEL: test_x86_avx2_psrav_d_const:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23]
; X86-AVX-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI88_0, kind: FK_Data_4
@@ -1441,7 +1441,7 @@ define <4 x i32> @test_x86_avx2_psrav_d_
; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_const:
-; X86-AVX512VL: ## BB#0:
+; X86-AVX512VL: ## %bb.0:
; X86-AVX512VL-NEXT: vmovdqa LCPI88_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI88_0, kind: FK_Data_4
@@ -1450,7 +1450,7 @@ define <4 x i32> @test_x86_avx2_psrav_d_
; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_psrav_d_const:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23]
; X64-AVX-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI88_0-4, kind: reloc_riprel_4byte
@@ -1459,7 +1459,7 @@ define <4 x i32> @test_x86_avx2_psrav_d_
; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_const:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI88_0-4, kind: reloc_riprel_4byte
@@ -1473,12 +1473,12 @@ declare <4 x i32> @llvm.x86.avx2.psrav.d
define <8 x i32> @test_x86_avx2_psrav_d_256(<8 x i32> %a0, <8 x i32> %a1) {
; AVX2-LABEL: test_x86_avx2_psrav_d_256:
-; AVX2: ## BB#0:
+; AVX2: ## %bb.0:
; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0xc1]
; AVX2-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; AVX512VL-LABEL: test_x86_avx2_psrav_d_256:
-; AVX512VL: ## BB#0:
+; AVX512VL: ## %bb.0:
; AVX512VL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0xc1]
; AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1487,7 +1487,7 @@ define <8 x i32> @test_x86_avx2_psrav_d_
define <8 x i32> @test_x86_avx2_psrav_d_256_const(<8 x i32> %a0, <8 x i32> %a1) {
; X86-AVX-LABEL: test_x86_avx2_psrav_d_256_const:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X86-AVX-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI90_0, kind: FK_Data_4
@@ -1496,7 +1496,7 @@ define <8 x i32> @test_x86_avx2_psrav_d_
; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const:
-; X86-AVX512VL: ## BB#0:
+; X86-AVX512VL: ## %bb.0:
; X86-AVX512VL-NEXT: vmovdqa LCPI90_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI90_0, kind: FK_Data_4
@@ -1505,7 +1505,7 @@ define <8 x i32> @test_x86_avx2_psrav_d_
; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_psrav_d_256_const:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X64-AVX-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI90_0-4, kind: reloc_riprel_4byte
@@ -1514,7 +1514,7 @@ define <8 x i32> @test_x86_avx2_psrav_d_
; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI90_0-4, kind: reloc_riprel_4byte
@@ -1528,13 +1528,13 @@ declare <8 x i32> @llvm.x86.avx2.psrav.d
define <2 x double> @test_x86_avx2_gather_d_pd(<2 x double> %a0, i8* %a1, <4 x i32> %idx, <2 x double> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_pd:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vgatherdpd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x92,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_d_pd:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x92,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> %a0,
@@ -1546,13 +1546,13 @@ declare <2 x double> @llvm.x86.avx2.gath
define <4 x double> @test_x86_avx2_gather_d_pd_256(<4 x double> %a0, i8* %a1, <4 x i32> %idx, <4 x double> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_pd_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vgatherdpd %ymm2, (%eax,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x92,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_d_pd_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x92,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %a0,
@@ -1564,13 +1564,13 @@ declare <4 x double> @llvm.x86.avx2.gath
define <2 x double> @test_x86_avx2_gather_q_pd(<2 x double> %a0, i8* %a1, <2 x i64> %idx, <2 x double> %mask) {
; X86-LABEL: test_x86_avx2_gather_q_pd:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vgatherqpd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x93,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_q_pd:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x93,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> %a0,
@@ -1582,13 +1582,13 @@ declare <2 x double> @llvm.x86.avx2.gath
define <4 x double> @test_x86_avx2_gather_q_pd_256(<4 x double> %a0, i8* %a1, <4 x i64> %idx, <4 x double> %mask) {
; X86-LABEL: test_x86_avx2_gather_q_pd_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vgatherqpd %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x93,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_q_pd_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x93,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %a0,
@@ -1600,13 +1600,13 @@ declare <4 x double> @llvm.x86.avx2.gath
define <4 x float> @test_x86_avx2_gather_d_ps(<4 x float> %a0, i8* %a1, <4 x i32> %idx, <4 x float> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_ps:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vgatherdps %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x92,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_d_ps:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x92,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> %a0,
@@ -1618,13 +1618,13 @@ declare <4 x float> @llvm.x86.avx2.gathe
define <8 x float> @test_x86_avx2_gather_d_ps_256(<8 x float> %a0, i8* %a1, <8 x i32> %idx, <8 x float> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_ps_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vgatherdps %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x92,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_d_ps_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x92,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %a0,
@@ -1636,13 +1636,13 @@ declare <8 x float> @llvm.x86.avx2.gathe
define <4 x float> @test_x86_avx2_gather_q_ps(<4 x float> %a0, i8* %a1, <2 x i64> %idx, <4 x float> %mask) {
; X86-LABEL: test_x86_avx2_gather_q_ps:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vgatherqps %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x93,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_q_ps:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x93,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> %a0,
@@ -1654,14 +1654,14 @@ declare <4 x float> @llvm.x86.avx2.gathe
define <4 x float> @test_x86_avx2_gather_q_ps_256(<4 x float> %a0, i8* %a1, <4 x i64> %idx, <4 x float> %mask) {
; X86-LABEL: test_x86_avx2_gather_q_ps_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vgatherqps %xmm2, (%eax,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x93,0x04,0x48]
; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_q_ps_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x93,0x04,0x4f]
; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -1674,13 +1674,13 @@ declare <4 x float> @llvm.x86.avx2.gathe
define <2 x i64> @test_x86_avx2_gather_d_q(<2 x i64> %a0, i8* %a1, <4 x i32> %idx, <2 x i64> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_q:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpgatherdq %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x90,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_d_q:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x90,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> %a0,
@@ -1692,13 +1692,13 @@ declare <2 x i64> @llvm.x86.avx2.gather.
define <4 x i64> @test_x86_avx2_gather_d_q_256(<4 x i64> %a0, i8* %a1, <4 x i32> %idx, <4 x i64> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_q_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpgatherdq %ymm2, (%eax,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x90,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_d_q_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x90,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %a0,
@@ -1710,13 +1710,13 @@ declare <4 x i64> @llvm.x86.avx2.gather.
define <2 x i64> @test_x86_avx2_gather_q_q(<2 x i64> %a0, i8* %a1, <2 x i64> %idx, <2 x i64> %mask) {
; X86-LABEL: test_x86_avx2_gather_q_q:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpgatherqq %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x91,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_q_q:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0xe9,0x91,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> %a0,
@@ -1728,13 +1728,13 @@ declare <2 x i64> @llvm.x86.avx2.gather.
define <4 x i64> @test_x86_avx2_gather_q_q_256(<4 x i64> %a0, i8* %a1, <4 x i64> %idx, <4 x i64> %mask) {
; X86-LABEL: test_x86_avx2_gather_q_q_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpgatherqq %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x91,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_q_q_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0xed,0x91,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %a0,
@@ -1746,13 +1746,13 @@ declare <4 x i64> @llvm.x86.avx2.gather.
define <4 x i32> @test_x86_avx2_gather_d_d(<4 x i32> %a0, i8* %a1, <4 x i32> %idx, <4 x i32> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_d:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpgatherdd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x90,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_d_d:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x90,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> %a0,
@@ -1764,13 +1764,13 @@ declare <4 x i32> @llvm.x86.avx2.gather.
define <8 x i32> @test_x86_avx2_gather_d_d_256(<8 x i32> %a0, i8* %a1, <8 x i32> %idx, <8 x i32> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_d_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpgatherdd %ymm2, (%eax,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x90,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_d_d_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 ## encoding: [0xc4,0xe2,0x6d,0x90,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %a0,
@@ -1782,13 +1782,13 @@ declare <8 x i32> @llvm.x86.avx2.gather.
define <4 x i32> @test_x86_avx2_gather_q_d(<4 x i32> %a0, i8* %a1, <2 x i64> %idx, <4 x i32> %mask) {
; X86-LABEL: test_x86_avx2_gather_q_d:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpgatherqd %xmm2, (%eax,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x91,0x04,0x48]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_q_d:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x69,0x91,0x04,0x4f]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> %a0,
@@ -1800,14 +1800,14 @@ declare <4 x i32> @llvm.x86.avx2.gather.
define <4 x i32> @test_x86_avx2_gather_q_d_256(<4 x i32> %a0, i8* %a1, <4 x i64> %idx, <4 x i32> %mask) {
; X86-LABEL: test_x86_avx2_gather_q_d_256:
-; X86: ## BB#0:
+; X86: ## %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT: vpgatherqd %xmm2, (%eax,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x91,0x04,0x48]
; X86-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; X86-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-LABEL: test_x86_avx2_gather_q_d_256:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 ## encoding: [0xc4,0xe2,0x6d,0x91,0x04,0x4f]
; X64-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; X64-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
@@ -1822,7 +1822,7 @@ declare <4 x i32> @llvm.x86.avx2.gather.
define <8 x float> @test_gather_mask(<8 x float> %a0, float* %a, <8 x i32> %idx, <8 x float> %mask, float* nocapture %out) {
;; gather with mask
; X86-AVX-LABEL: test_gather_mask:
-; X86-AVX: ## BB#0:
+; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
; X86-AVX-NEXT: vmovaps %ymm2, %ymm3 ## encoding: [0xc5,0xfc,0x28,0xda]
@@ -1831,7 +1831,7 @@ define <8 x float> @test_gather_mask(<8
; X86-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_gather_mask:
-; X86-AVX512VL: ## BB#0:
+; X86-AVX512VL: ## %bb.0:
; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
; X86-AVX512VL-NEXT: vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda]
@@ -1840,14 +1840,14 @@ define <8 x float> @test_gather_mask(<8
; X86-AVX512VL-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX-LABEL: test_gather_mask:
-; X64-AVX: ## BB#0:
+; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vmovaps %ymm2, %ymm3 ## encoding: [0xc5,0xfc,0x28,0xda]
; X64-AVX-NEXT: vgatherdps %ymm3, (%rdi,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x8f]
; X64-AVX-NEXT: vmovups %ymm2, (%rsi) ## encoding: [0xc5,0xfc,0x11,0x16]
; X64-AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_gather_mask:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda]
; X64-AVX512VL-NEXT: vgatherdps %ymm3, (%rdi,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x8f]
; X64-AVX512VL-NEXT: vmovups %ymm2, (%rsi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x16]
Modified: llvm/trunk/test/CodeGen/X86/avx2-logic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-logic.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-logic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-logic.ll Mon Dec 4 09:18:51 2017
@@ -4,14 +4,14 @@
define <4 x i64> @vpandn(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpandn:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X32-NEXT: vpsubq %ymm1, %ymm0, %ymm1
; X32-NEXT: vpandn %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpandn:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X64-NEXT: vpsubq %ymm1, %ymm0, %ymm1
; X64-NEXT: vpandn %ymm0, %ymm1, %ymm0
@@ -26,14 +26,14 @@ entry:
define <4 x i64> @vpand(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpand:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X32-NEXT: vpand %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpand:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X64-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -47,14 +47,14 @@ entry:
define <4 x i64> @vpor(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpor:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X32-NEXT: vpor %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpor:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X64-NEXT: vpor %ymm1, %ymm0, %ymm0
@@ -68,14 +68,14 @@ entry:
define <4 x i64> @vpxor(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpxor:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X32-NEXT: vpxor %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpxor:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X64-NEXT: vpxor %ymm1, %ymm0, %ymm0
@@ -89,14 +89,14 @@ entry:
define <32 x i8> @vpblendvb(<32 x i1> %cond, <32 x i8> %x, <32 x i8> %y) {
; X32-LABEL: vpblendvb:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $7, %ymm0, %ymm0
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpblendvb:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $7, %ymm0, %ymm0
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
@@ -107,12 +107,12 @@ define <32 x i8> @vpblendvb(<32 x i1> %c
define <8 x i32> @allOnes() nounwind {
; X32-LABEL: allOnes:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: allOnes:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
ret <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -120,12 +120,12 @@ define <8 x i32> @allOnes() nounwind {
define <16 x i16> @allOnes2() nounwind {
; X32-LABEL: allOnes2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: allOnes2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
ret <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
Modified: llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll Mon Dec 4 09:18:51 2017
@@ -8,7 +8,7 @@ declare <2 x i32> @llvm.masked.gather.v2
define <2 x i32> @masked_gather_v2i32(<2 x i32*>* %ptr, <2 x i1> %masks, <2 x i32> %passthro) {
; X86-LABEL: masked_gather_v2i32:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -19,7 +19,7 @@ define <2 x i32> @masked_gather_v2i32(<2
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v2i32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vmovdqa (%rdi), %xmm2
; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -29,20 +29,20 @@ define <2 x i32> @masked_gather_v2i32(<2
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v2i32:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB0_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; NOGATHER-NEXT: .LBB0_2: # %else
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB0_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: movl (%rax), %eax
; NOGATHER-NEXT: vpinsrq $1, %rax, %xmm2, %xmm2
@@ -58,7 +58,7 @@ entry:
define <4 x i32> @masked_gather_v2i32_concat(<2 x i32*>* %ptr, <2 x i1> %masks, <2 x i32> %passthro) {
; X86-LABEL: masked_gather_v2i32_concat:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@@ -69,7 +69,7 @@ define <4 x i32> @masked_gather_v2i32_co
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v2i32_concat:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vmovdqa (%rdi), %xmm2
; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -79,20 +79,20 @@ define <4 x i32> @masked_gather_v2i32_co
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v2i32_concat:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB1_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; NOGATHER-NEXT: .LBB1_2: # %else
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB1_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: movl (%rax), %eax
; NOGATHER-NEXT: vpinsrq $1, %rax, %xmm2, %xmm2
@@ -112,7 +112,7 @@ declare <2 x float> @llvm.masked.gather.
define <2 x float> @masked_gather_v2float(<2 x float*>* %ptr, <2 x i1> %masks, <2 x float> %passthro) {
; X86-LABEL: masked_gather_v2float:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; X86-NEXT: vpslld $31, %xmm0, %xmm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -122,7 +122,7 @@ define <2 x float> @masked_gather_v2floa
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v2float:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vmovaps (%rdi), %xmm2
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-NEXT: vpslld $31, %xmm0, %xmm0
@@ -131,20 +131,20 @@ define <2 x float> @masked_gather_v2floa
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v2float:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB2_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; NOGATHER-NEXT: .LBB2_2: # %else
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB2_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3]
; NOGATHER-NEXT: .LBB2_4: # %else2
@@ -160,7 +160,7 @@ entry:
define <4 x float> @masked_gather_v2float_concat(<2 x float*>* %ptr, <2 x i1> %masks, <2 x float> %passthro) {
; X86-LABEL: masked_gather_v2float_concat:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; X86-NEXT: vpslld $31, %xmm0, %xmm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -170,7 +170,7 @@ define <4 x float> @masked_gather_v2floa
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v2float_concat:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vmovaps (%rdi), %xmm2
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-NEXT: vpslld $31, %xmm0, %xmm0
@@ -179,20 +179,20 @@ define <4 x float> @masked_gather_v2floa
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v2float_concat:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB3_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; NOGATHER-NEXT: .LBB3_2: # %else
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB3_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3]
; NOGATHER-NEXT: .LBB3_4: # %else2
@@ -212,14 +212,14 @@ declare <4 x i32> @llvm.masked.gather.v4
define <4 x i32> @masked_gather_v4i32(<4 x i32*> %ptrs, <4 x i1> %masks, <4 x i32> %passthro) {
; X86-LABEL: masked_gather_v4i32:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: vpslld $31, %xmm1, %xmm1
; X86-NEXT: vpgatherdd %xmm1, (,%xmm0), %xmm2
; X86-NEXT: vmovdqa %xmm2, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v4i32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpslld $31, %xmm1, %xmm1
; X64-NEXT: vpgatherqd %xmm1, (,%ymm0), %xmm2
; X64-NEXT: vmovdqa %xmm2, %xmm0
@@ -227,26 +227,26 @@ define <4 x i32> @masked_gather_v4i32(<4
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v4i32:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vpextrb $0, %xmm1, %eax
; NOGATHER-NEXT: # implicit-def: %xmm3
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB4_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm0, %rax
; NOGATHER-NEXT: vmovd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; NOGATHER-NEXT: .LBB4_2: # %else
; NOGATHER-NEXT: vpextrb $4, %xmm1, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB4_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm0, %rax
; NOGATHER-NEXT: vpinsrd $1, (%rax), %xmm3, %xmm3
; NOGATHER-NEXT: .LBB4_4: # %else2
; NOGATHER-NEXT: vpextrb $8, %xmm1, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB4_6
-; NOGATHER-NEXT: # BB#5: # %cond.load4
+; NOGATHER-NEXT: # %bb.5: # %cond.load4
; NOGATHER-NEXT: vextractf128 $1, %ymm0, %xmm4
; NOGATHER-NEXT: vmovq %xmm4, %rax
; NOGATHER-NEXT: vpinsrd $2, (%rax), %xmm3, %xmm3
@@ -254,7 +254,7 @@ define <4 x i32> @masked_gather_v4i32(<4
; NOGATHER-NEXT: vpextrb $12, %xmm1, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB4_8
-; NOGATHER-NEXT: # BB#7: # %cond.load7
+; NOGATHER-NEXT: # %bb.7: # %cond.load7
; NOGATHER-NEXT: vextractf128 $1, %ymm0, %xmm0
; NOGATHER-NEXT: vpextrq $1, %xmm0, %rax
; NOGATHER-NEXT: vpinsrd $3, (%rax), %xmm3, %xmm3
@@ -272,14 +272,14 @@ declare <4 x float> @llvm.masked.gather.
define <4 x float> @masked_gather_v4float(<4 x float*> %ptrs, <4 x i1> %masks, <4 x float> %passthro) {
; X86-LABEL: masked_gather_v4float:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: vpslld $31, %xmm1, %xmm1
; X86-NEXT: vgatherdps %xmm1, (,%xmm0), %xmm2
; X86-NEXT: vmovaps %xmm2, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v4float:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpslld $31, %xmm1, %xmm1
; X64-NEXT: vgatherqps %xmm1, (,%ymm0), %xmm2
; X64-NEXT: vmovaps %xmm2, %xmm0
@@ -287,26 +287,26 @@ define <4 x float> @masked_gather_v4floa
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v4float:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vpextrb $0, %xmm1, %eax
; NOGATHER-NEXT: # implicit-def: %xmm3
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB5_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm0, %rax
; NOGATHER-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; NOGATHER-NEXT: .LBB5_2: # %else
; NOGATHER-NEXT: vpextrb $4, %xmm1, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB5_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm0, %rax
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],mem[0],xmm3[2,3]
; NOGATHER-NEXT: .LBB5_4: # %else2
; NOGATHER-NEXT: vpextrb $8, %xmm1, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB5_6
-; NOGATHER-NEXT: # BB#5: # %cond.load4
+; NOGATHER-NEXT: # %bb.5: # %cond.load4
; NOGATHER-NEXT: vextractf128 $1, %ymm0, %xmm4
; NOGATHER-NEXT: vmovq %xmm4, %rax
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],mem[0],xmm3[3]
@@ -314,7 +314,7 @@ define <4 x float> @masked_gather_v4floa
; NOGATHER-NEXT: vpextrb $12, %xmm1, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB5_8
-; NOGATHER-NEXT: # BB#7: # %cond.load7
+; NOGATHER-NEXT: # %bb.7: # %cond.load7
; NOGATHER-NEXT: vextractf128 $1, %ymm0, %xmm0
; NOGATHER-NEXT: vpextrq $1, %xmm0, %rax
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],mem[0]
@@ -332,7 +332,7 @@ declare <8 x i32> @llvm.masked.gather.v8
define <8 x i32> @masked_gather_v8i32(<8 x i32*>* %ptr, <8 x i1> %masks, <8 x i32> %passthro) {
; X86-LABEL: masked_gather_v8i32:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X86-NEXT: vpslld $31, %ymm0, %ymm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -342,7 +342,7 @@ define <8 x i32> @masked_gather_v8i32(<8
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v8i32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpslld $31, %ymm0, %ymm0
; X64-NEXT: vpsrad $31, %ymm0, %ymm0
@@ -356,21 +356,21 @@ define <8 x i32> @masked_gather_v8i32(<8
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v8i32:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm4
; NOGATHER-NEXT: vmovdqa 32(%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm4, %rax
; NOGATHER-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; NOGATHER-NEXT: .LBB6_2: # %else
; NOGATHER-NEXT: vpextrb $2, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm4, %rax
; NOGATHER-NEXT: vpinsrd $1, (%rax), %xmm2, %xmm5
; NOGATHER-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
@@ -378,7 +378,7 @@ define <8 x i32> @masked_gather_v8i32(<8
; NOGATHER-NEXT: vpextrb $4, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_6
-; NOGATHER-NEXT: # BB#5: # %cond.load4
+; NOGATHER-NEXT: # %bb.5: # %cond.load4
; NOGATHER-NEXT: vextractf128 $1, %ymm4, %xmm5
; NOGATHER-NEXT: vmovq %xmm5, %rax
; NOGATHER-NEXT: vpinsrd $2, (%rax), %xmm2, %xmm5
@@ -387,7 +387,7 @@ define <8 x i32> @masked_gather_v8i32(<8
; NOGATHER-NEXT: vpextrb $6, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_8
-; NOGATHER-NEXT: # BB#7: # %cond.load7
+; NOGATHER-NEXT: # %bb.7: # %cond.load7
; NOGATHER-NEXT: vextractf128 $1, %ymm4, %xmm4
; NOGATHER-NEXT: vpextrq $1, %xmm4, %rax
; NOGATHER-NEXT: vpinsrd $3, (%rax), %xmm2, %xmm4
@@ -396,7 +396,7 @@ define <8 x i32> @masked_gather_v8i32(<8
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_10
-; NOGATHER-NEXT: # BB#9: # %cond.load10
+; NOGATHER-NEXT: # %bb.9: # %cond.load10
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm4
; NOGATHER-NEXT: vpinsrd $0, (%rax), %xmm4, %xmm4
@@ -405,7 +405,7 @@ define <8 x i32> @masked_gather_v8i32(<8
; NOGATHER-NEXT: vpextrb $10, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_12
-; NOGATHER-NEXT: # BB#11: # %cond.load13
+; NOGATHER-NEXT: # %bb.11: # %cond.load13
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm4
; NOGATHER-NEXT: vpinsrd $1, (%rax), %xmm4, %xmm4
@@ -414,7 +414,7 @@ define <8 x i32> @masked_gather_v8i32(<8
; NOGATHER-NEXT: vpextrb $12, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_14
-; NOGATHER-NEXT: # BB#13: # %cond.load16
+; NOGATHER-NEXT: # %bb.13: # %cond.load16
; NOGATHER-NEXT: vextractf128 $1, %ymm3, %xmm4
; NOGATHER-NEXT: vmovq %xmm4, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm4
@@ -424,7 +424,7 @@ define <8 x i32> @masked_gather_v8i32(<8
; NOGATHER-NEXT: vpextrb $14, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_16
-; NOGATHER-NEXT: # BB#15: # %cond.load19
+; NOGATHER-NEXT: # %bb.15: # %cond.load19
; NOGATHER-NEXT: vextractf128 $1, %ymm3, %xmm3
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm3
@@ -448,7 +448,7 @@ declare <8 x float> @llvm.masked.gather.
define <8 x float> @masked_gather_v8float(<8 x float*>* %ptr, <8 x i1> %masks, <8 x float> %passthro) {
; X86-LABEL: masked_gather_v8float:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X86-NEXT: vpslld $31, %ymm0, %ymm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -458,7 +458,7 @@ define <8 x float> @masked_gather_v8floa
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v8float:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpslld $31, %ymm0, %ymm0
; X64-NEXT: vpsrad $31, %ymm0, %ymm0
@@ -472,21 +472,21 @@ define <8 x float> @masked_gather_v8floa
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v8float:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm4
; NOGATHER-NEXT: vmovdqa 32(%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm4, %rax
; NOGATHER-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; NOGATHER-NEXT: .LBB7_2: # %else
; NOGATHER-NEXT: vpextrb $2, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm4, %rax
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm5 = xmm2[0],mem[0],xmm2[2,3]
; NOGATHER-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
@@ -494,7 +494,7 @@ define <8 x float> @masked_gather_v8floa
; NOGATHER-NEXT: vpextrb $4, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_6
-; NOGATHER-NEXT: # BB#5: # %cond.load4
+; NOGATHER-NEXT: # %bb.5: # %cond.load4
; NOGATHER-NEXT: vextractf128 $1, %ymm4, %xmm5
; NOGATHER-NEXT: vmovq %xmm5, %rax
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm5 = xmm2[0,1],mem[0],xmm2[3]
@@ -503,7 +503,7 @@ define <8 x float> @masked_gather_v8floa
; NOGATHER-NEXT: vpextrb $6, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_8
-; NOGATHER-NEXT: # BB#7: # %cond.load7
+; NOGATHER-NEXT: # %bb.7: # %cond.load7
; NOGATHER-NEXT: vextractf128 $1, %ymm4, %xmm4
; NOGATHER-NEXT: vpextrq $1, %xmm4, %rax
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm4 = xmm2[0,1,2],mem[0]
@@ -512,7 +512,7 @@ define <8 x float> @masked_gather_v8floa
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_10
-; NOGATHER-NEXT: # BB#9: # %cond.load10
+; NOGATHER-NEXT: # %bb.9: # %cond.load10
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm5
@@ -522,7 +522,7 @@ define <8 x float> @masked_gather_v8floa
; NOGATHER-NEXT: vpextrb $10, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_12
-; NOGATHER-NEXT: # BB#11: # %cond.load13
+; NOGATHER-NEXT: # %bb.11: # %cond.load13
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm4
; NOGATHER-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],mem[0],xmm4[2,3]
@@ -531,7 +531,7 @@ define <8 x float> @masked_gather_v8floa
; NOGATHER-NEXT: vpextrb $12, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_14
-; NOGATHER-NEXT: # BB#13: # %cond.load16
+; NOGATHER-NEXT: # %bb.13: # %cond.load16
; NOGATHER-NEXT: vextractf128 $1, %ymm3, %xmm4
; NOGATHER-NEXT: vmovq %xmm4, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm4
@@ -541,7 +541,7 @@ define <8 x float> @masked_gather_v8floa
; NOGATHER-NEXT: vpextrb $14, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_16
-; NOGATHER-NEXT: # BB#15: # %cond.load19
+; NOGATHER-NEXT: # %bb.15: # %cond.load19
; NOGATHER-NEXT: vextractf128 $1, %ymm3, %xmm3
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm3
@@ -565,7 +565,7 @@ declare <4 x i64> @llvm.masked.gather.v4
define <4 x i64> @masked_gather_v4i64(<4 x i64*>* %ptr, <4 x i1> %masks, <4 x i64> %passthro) {
; X86-LABEL: masked_gather_v4i64:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: vpslld $31, %xmm0, %xmm0
; X86-NEXT: vpmovsxdq %xmm0, %ymm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -575,7 +575,7 @@ define <4 x i64> @masked_gather_v4i64(<4
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v4i64:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpslld $31, %xmm0, %xmm0
; X64-NEXT: vpmovsxdq %xmm0, %ymm0
; X64-NEXT: vmovdqa (%rdi), %ymm2
@@ -584,20 +584,20 @@ define <4 x i64> @masked_gather_v4i64(<4
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v4i64:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB8_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; NOGATHER-NEXT: .LBB8_2: # %else
; NOGATHER-NEXT: vpextrb $4, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB8_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vpinsrq $1, (%rax), %xmm2, %xmm4
; NOGATHER-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7]
@@ -605,7 +605,7 @@ define <4 x i64> @masked_gather_v4i64(<4
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB8_6
-; NOGATHER-NEXT: # BB#5: # %cond.load4
+; NOGATHER-NEXT: # %bb.5: # %cond.load4
; NOGATHER-NEXT: vextractf128 $1, %ymm3, %xmm4
; NOGATHER-NEXT: vmovq %xmm4, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm4
@@ -615,7 +615,7 @@ define <4 x i64> @masked_gather_v4i64(<4
; NOGATHER-NEXT: vpextrb $12, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB8_8
-; NOGATHER-NEXT: # BB#7: # %cond.load7
+; NOGATHER-NEXT: # %bb.7: # %cond.load7
; NOGATHER-NEXT: vextractf128 $1, %ymm3, %xmm3
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm3
@@ -640,7 +640,7 @@ declare <4 x double> @llvm.masked.gather
define <4 x double> @masked_gather_v4double(<4 x double*>* %ptr, <4 x i1> %masks, <4 x double> %passthro) {
; X86-LABEL: masked_gather_v4double:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: vpslld $31, %xmm0, %xmm0
; X86-NEXT: vpmovsxdq %xmm0, %ymm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -650,7 +650,7 @@ define <4 x double> @masked_gather_v4dou
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v4double:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpslld $31, %xmm0, %xmm0
; X64-NEXT: vpmovsxdq %xmm0, %ymm0
; X64-NEXT: vmovapd (%rdi), %ymm2
@@ -659,20 +659,20 @@ define <4 x double> @masked_gather_v4dou
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v4double:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB9_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; NOGATHER-NEXT: .LBB9_2: # %else
; NOGATHER-NEXT: vpextrb $4, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB9_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vmovhpd {{.*#+}} xmm4 = xmm2[0],mem[0]
; NOGATHER-NEXT: vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3]
@@ -680,7 +680,7 @@ define <4 x double> @masked_gather_v4dou
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB9_6
-; NOGATHER-NEXT: # BB#5: # %cond.load4
+; NOGATHER-NEXT: # %bb.5: # %cond.load4
; NOGATHER-NEXT: vextractf128 $1, %ymm3, %xmm4
; NOGATHER-NEXT: vmovq %xmm4, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm4
@@ -690,7 +690,7 @@ define <4 x double> @masked_gather_v4dou
; NOGATHER-NEXT: vpextrb $12, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB9_8
-; NOGATHER-NEXT: # BB#7: # %cond.load7
+; NOGATHER-NEXT: # %bb.7: # %cond.load7
; NOGATHER-NEXT: vextractf128 $1, %ymm3, %xmm3
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vextractf128 $1, %ymm2, %xmm3
@@ -715,7 +715,7 @@ declare <2 x i64> @llvm.masked.gather.v2
define <2 x i64> @masked_gather_v2i64(<2 x i64*>* %ptr, <2 x i1> %masks, <2 x i64> %passthro) {
; X86-LABEL: masked_gather_v2i64:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmovsxdq (%eax), %xmm2
; X86-NEXT: vpsllq $63, %xmm0, %xmm0
@@ -724,7 +724,7 @@ define <2 x i64> @masked_gather_v2i64(<2
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v2i64:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsllq $63, %xmm0, %xmm0
; X64-NEXT: vmovdqa (%rdi), %xmm2
; X64-NEXT: vpgatherqq %xmm0, (,%xmm2), %xmm1
@@ -732,20 +732,20 @@ define <2 x i64> @masked_gather_v2i64(<2
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v2i64:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB10_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; NOGATHER-NEXT: .LBB10_2: # %else
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB10_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vpinsrq $1, (%rax), %xmm2, %xmm2
; NOGATHER-NEXT: .LBB10_4: # %else2
@@ -762,7 +762,7 @@ declare <2 x double> @llvm.masked.gather
define <2 x double> @masked_gather_v2double(<2 x double*>* %ptr, <2 x i1> %masks, <2 x double> %passthro) {
; X86-LABEL: masked_gather_v2double:
-; X86: # BB#0: # %entry
+; X86: # %bb.0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmovsxdq (%eax), %xmm2
; X86-NEXT: vpsllq $63, %xmm0, %xmm0
@@ -771,7 +771,7 @@ define <2 x double> @masked_gather_v2dou
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v2double:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsllq $63, %xmm0, %xmm0
; X64-NEXT: vmovapd (%rdi), %xmm2
; X64-NEXT: vgatherqpd %xmm0, (,%xmm2), %xmm1
@@ -779,20 +779,20 @@ define <2 x double> @masked_gather_v2dou
; X64-NEXT: retq
;
; NOGATHER-LABEL: masked_gather_v2double:
-; NOGATHER: # BB#0: # %entry
+; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB11_2
-; NOGATHER-NEXT: # BB#1: # %cond.load
+; NOGATHER-NEXT: # %bb.1: # %cond.load
; NOGATHER-NEXT: vmovq %xmm3, %rax
; NOGATHER-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; NOGATHER-NEXT: .LBB11_2: # %else
; NOGATHER-NEXT: vpextrb $8, %xmm0, %eax
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB11_4
-; NOGATHER-NEXT: # BB#3: # %cond.load1
+; NOGATHER-NEXT: # %bb.3: # %cond.load1
; NOGATHER-NEXT: vpextrq $1, %xmm3, %rax
; NOGATHER-NEXT: vmovhpd {{.*#+}} xmm2 = xmm2[0],mem[0]
; NOGATHER-NEXT: .LBB11_4: # %else2
Modified: llvm/trunk/test/CodeGen/X86/avx2-nontemporal.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-nontemporal.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-nontemporal.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-nontemporal.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define void @f(<8 x float> %A, i8* %B, <4 x double> %C, <4 x i64> %E, <8 x i32> %F, <16 x i16> %G, <32 x i8> %H) nounwind {
; X32-LABEL: f:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-32, %esp
@@ -31,7 +31,7 @@ define void @f(<8 x float> %A, i8* %B, <
; X32-NEXT: retl
;
; X64-LABEL: f:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vmovntps %ymm0, (%rdi)
; X64-NEXT: vpaddq {{.*}}(%rip), %ymm2, %ymm0
Modified: llvm/trunk/test/CodeGen/X86/avx2-phaddsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-phaddsub.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-phaddsub.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-phaddsub.ll Mon Dec 4 09:18:51 2017
@@ -4,12 +4,12 @@
define <16 x i16> @phaddw1(<16 x i16> %x, <16 x i16> %y) {
; X32-LABEL: phaddw1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddw1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <16 x i16> %x, <16 x i16> %y, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 16, i32 18, i32 20, i32 22, i32 8, i32 10, i32 12, i32 14, i32 24, i32 26, i32 28, i32 30>
@@ -20,12 +20,12 @@ define <16 x i16> @phaddw1(<16 x i16> %x
define <16 x i16> @phaddw2(<16 x i16> %x, <16 x i16> %y) {
; X32-LABEL: phaddw2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddw2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <16 x i16> %x, <16 x i16> %y, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 17, i32 19, i32 21, i32 23, i32 9, i32 11, i32 13, i32 15, i32 25, i32 27, i32 29, i32 31>
@@ -36,12 +36,12 @@ define <16 x i16> @phaddw2(<16 x i16> %x
define <8 x i32> @phaddd1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: phaddd1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddd1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
@@ -52,12 +52,12 @@ define <8 x i32> @phaddd1(<8 x i32> %x,
define <8 x i32> @phaddd2(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: phaddd2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddd2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 1, i32 2, i32 9, i32 10, i32 5, i32 6, i32 13, i32 14>
@@ -68,12 +68,12 @@ define <8 x i32> @phaddd2(<8 x i32> %x,
define <8 x i32> @phaddd3(<8 x i32> %x) {
; X32-LABEL: phaddd3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphaddd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddd3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphaddd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> undef, <8 x i32> <i32 undef, i32 2, i32 8, i32 10, i32 4, i32 6, i32 undef, i32 14>
@@ -84,12 +84,12 @@ define <8 x i32> @phaddd3(<8 x i32> %x)
define <16 x i16> @phsubw1(<16 x i16> %x, <16 x i16> %y) {
; X32-LABEL: phsubw1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphsubw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phsubw1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphsubw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <16 x i16> %x, <16 x i16> %y, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 16, i32 18, i32 20, i32 22, i32 8, i32 10, i32 12, i32 14, i32 24, i32 26, i32 28, i32 30>
@@ -100,12 +100,12 @@ define <16 x i16> @phsubw1(<16 x i16> %x
define <8 x i32> @phsubd1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: phsubd1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phsubd1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
@@ -116,12 +116,12 @@ define <8 x i32> @phsubd1(<8 x i32> %x,
define <8 x i32> @phsubd2(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: phsubd2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phsubd2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 0, i32 undef, i32 8, i32 undef, i32 4, i32 6, i32 12, i32 14>
Modified: llvm/trunk/test/CodeGen/X86/avx2-pmovxrm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-pmovxrm.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-pmovxrm.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-pmovxrm.ll Mon Dec 4 09:18:51 2017
@@ -6,13 +6,13 @@
define <16 x i16> @test_llvm_x86_avx2_pmovsxbw(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovsxbw:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbw (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovsxbw:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxbw (%rdi), %ymm0
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -22,13 +22,13 @@ define <16 x i16> @test_llvm_x86_avx2_pm
define <8 x i32> @test_llvm_x86_avx2_pmovsxbd(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovsxbd:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovsxbd:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxbd (%rdi), %ymm0
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -39,13 +39,13 @@ define <8 x i32> @test_llvm_x86_avx2_pmo
define <4 x i64> @test_llvm_x86_avx2_pmovsxbq(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovsxbq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovsxbq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxbq (%rdi), %ymm0
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -56,13 +56,13 @@ define <4 x i64> @test_llvm_x86_avx2_pmo
define <8 x i32> @test_llvm_x86_avx2_pmovsxwd(<8 x i16>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovsxwd:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxwd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovsxwd:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxwd (%rdi), %ymm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a, align 1
@@ -72,13 +72,13 @@ define <8 x i32> @test_llvm_x86_avx2_pmo
define <4 x i64> @test_llvm_x86_avx2_pmovsxwq(<8 x i16>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovsxwq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxwq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovsxwq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxwq (%rdi), %ymm0
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a, align 1
@@ -89,13 +89,13 @@ define <4 x i64> @test_llvm_x86_avx2_pmo
define <4 x i64> @test_llvm_x86_avx2_pmovsxdq(<4 x i32>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovsxdq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxdq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovsxdq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovsxdq (%rdi), %ymm0
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a, align 1
@@ -105,13 +105,13 @@ define <4 x i64> @test_llvm_x86_avx2_pmo
define <16 x i16> @test_llvm_x86_avx2_pmovzxbw(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovzxbw:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovzxbw:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -121,13 +121,13 @@ define <16 x i16> @test_llvm_x86_avx2_pm
define <8 x i32> @test_llvm_x86_avx2_pmovzxbd(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovzxbd:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovzxbd:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -138,13 +138,13 @@ define <8 x i32> @test_llvm_x86_avx2_pmo
define <4 x i64> @test_llvm_x86_avx2_pmovzxbq(<16 x i8>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovzxbq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovzxbq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
; X64-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a, align 1
@@ -155,13 +155,13 @@ define <4 x i64> @test_llvm_x86_avx2_pmo
define <8 x i32> @test_llvm_x86_avx2_pmovzxwd(<8 x i16>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovzxwd:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovzxwd:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a, align 1
@@ -171,13 +171,13 @@ define <8 x i32> @test_llvm_x86_avx2_pmo
define <4 x i64> @test_llvm_x86_avx2_pmovzxwq(<8 x i16>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovzxwq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovzxwq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; X64-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a, align 1
@@ -188,13 +188,13 @@ define <4 x i64> @test_llvm_x86_avx2_pmo
define <4 x i64> @test_llvm_x86_avx2_pmovzxdq(<4 x i32>* %a) {
; X32-LABEL: test_llvm_x86_avx2_pmovzxdq:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; X32-NEXT: retl
;
; X64-LABEL: test_llvm_x86_avx2_pmovzxdq:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; X64-NEXT: retq
%1 = load <4 x i32>, <4 x i32>* %a, align 1
Modified: llvm/trunk/test/CodeGen/X86/avx2-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-schedule.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-schedule.ll Mon Dec 4 09:18:51 2017
@@ -8,37 +8,37 @@
define <8 x i32> @test_broadcasti128(<8 x i32> %a0, <4 x i32> *%a1) {
; GENERIC-LABEL: test_broadcasti128:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] sched: [4:0.50]
; GENERIC-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_broadcasti128:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] sched: [1:0.50]
; HASWELL-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_broadcasti128:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] sched: [6:0.50]
; BROADWELL-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_broadcasti128:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] sched: [7:0.50]
; SKYLAKE-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_broadcasti128:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] sched: [7:0.50]
; SKX-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_broadcasti128:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] sched: [8:0.50]
; ZNVER1-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -50,37 +50,37 @@ define <8 x i32> @test_broadcasti128(<8
define <4 x double> @test_broadcastsd_ymm(<2 x double> %a0) {
; GENERIC-LABEL: test_broadcastsd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastsd %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_broadcastsd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vbroadcastsd %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_broadcastsd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vbroadcastsd %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_broadcastsd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vbroadcastsd %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_broadcastsd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastsd %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_broadcastsd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vbroadcastsd %xmm0, %ymm0 # sched: [100:0.25]
; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -91,37 +91,37 @@ define <4 x double> @test_broadcastsd_ym
define <4 x float> @test_broadcastss(<4 x float> %a0) {
; GENERIC-LABEL: test_broadcastss:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastss %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_broadcastss:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vbroadcastss %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_broadcastss:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vbroadcastss %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_broadcastss:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vbroadcastss %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_broadcastss:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastss %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_broadcastss:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vbroadcastss %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -132,37 +132,37 @@ define <4 x float> @test_broadcastss(<4
define <8 x float> @test_broadcastss_ymm(<4 x float> %a0) {
; GENERIC-LABEL: test_broadcastss_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vbroadcastss %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_broadcastss_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vbroadcastss %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_broadcastss_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vbroadcastss %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_broadcastss_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vbroadcastss %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_broadcastss_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vbroadcastss %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_broadcastss_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vbroadcastss %xmm0, %ymm0 # sched: [100:0.25]
; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -173,7 +173,7 @@ define <8 x float> @test_broadcastss_ymm
define <4 x i32> @test_extracti128(<8 x i32> %a0, <8 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_extracti128:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm2 # sched: [3:1.00]
; GENERIC-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [1:1.00]
@@ -182,7 +182,7 @@ define <4 x i32> @test_extracti128(<8 x
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_extracti128:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm2 # sched: [1:0.50]
; HASWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [3:1.00]
@@ -191,7 +191,7 @@ define <4 x i32> @test_extracti128(<8 x
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_extracti128:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm2 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [3:1.00]
@@ -200,7 +200,7 @@ define <4 x i32> @test_extracti128(<8 x
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_extracti128:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm2 # sched: [1:0.33]
; SKYLAKE-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [3:1.00]
@@ -209,7 +209,7 @@ define <4 x i32> @test_extracti128(<8 x
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_extracti128:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm2 # sched: [1:0.33]
; SKX-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [3:1.00]
@@ -218,7 +218,7 @@ define <4 x i32> @test_extracti128(<8 x
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_extracti128:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm2 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [2:0.25]
@@ -235,32 +235,32 @@ define <4 x i32> @test_extracti128(<8 x
define <2 x double> @test_gatherdpd(<2 x double> %a0, i8* %a1, <4 x i32> %a2, <2 x double> %a3) {
; GENERIC-LABEL: test_gatherdpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_gatherdpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_gatherdpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [25:3.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_gatherdpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_gatherdpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_gatherdpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> %a0, i8* %a1, <4 x i32> %a2, <2 x double> %a3, i8 2)
@@ -270,32 +270,32 @@ declare <2 x double> @llvm.x86.avx2.gath
define <4 x double> @test_gatherdpd_ymm(<4 x double> %a0, i8* %a1, <4 x i32> %a2, <4 x double> %a3) {
; GENERIC-LABEL: test_gatherdpd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_gatherdpd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_gatherdpd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0 # sched: [26:5.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_gatherdpd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0 # sched: [25:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_gatherdpd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0 # sched: [25:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_gatherdpd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %a0, i8* %a1, <4 x i32> %a2, <4 x double> %a3, i8 8)
@@ -305,32 +305,32 @@ declare <4 x double> @llvm.x86.avx2.gath
define <4 x float> @test_gatherdps(<4 x float> %a0, i8* %a1, <4 x i32> %a2, <4 x float> %a3) {
; GENERIC-LABEL: test_gatherdps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_gatherdps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_gatherdps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [25:3.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_gatherdps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_gatherdps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_gatherdps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> %a0, i8* %a1, <4 x i32> %a2, <4 x float> %a3, i8 2)
@@ -340,32 +340,32 @@ declare <4 x float> @llvm.x86.avx2.gathe
define <8 x float> @test_gatherdps_ymm(<8 x float> %a0, i8* %a1, <8 x i32> %a2, <8 x float> %a3) {
; GENERIC-LABEL: test_gatherdps_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_gatherdps_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_gatherdps_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0 # sched: [26:4.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_gatherdps_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0 # sched: [25:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_gatherdps_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0 # sched: [25:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_gatherdps_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %a0, i8* %a1, <8 x i32> %a2, <8 x float> %a3, i8 4)
@@ -375,32 +375,32 @@ declare <8 x float> @llvm.x86.avx2.gathe
define <2 x double> @test_gatherqpd(<2 x double> %a0, i8* %a1, <2 x i64> %a2, <2 x double> %a3) {
; GENERIC-LABEL: test_gatherqpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_gatherqpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_gatherqpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:3.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_gatherqpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_gatherqpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_gatherqpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> %a0, i8* %a1, <2 x i64> %a2, <2 x double> %a3, i8 2)
@@ -410,32 +410,32 @@ declare <2 x double> @llvm.x86.avx2.gath
define <4 x double> @test_gatherqpd_ymm(<4 x double> %a0, i8* %a1, <4 x i64> %a2, <4 x double> %a3) {
; GENERIC-LABEL: test_gatherqpd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_gatherqpd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_gatherqpd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0 # sched: [23:3.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_gatherqpd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0 # sched: [25:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_gatherqpd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0 # sched: [25:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_gatherqpd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %a0, i8* %a1, <4 x i64> %a2, <4 x double> %a3, i8 8)
@@ -445,32 +445,32 @@ declare <4 x double> @llvm.x86.avx2.gath
define <4 x float> @test_gatherqps(<4 x float> %a0, i8* %a1, <2 x i64> %a2, <4 x float> %a3) {
; GENERIC-LABEL: test_gatherqps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_gatherqps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_gatherqps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [27:5.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_gatherqps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_gatherqps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_gatherqps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> %a0, i8* %a1, <2 x i64> %a2, <4 x float> %a3, i8 2)
@@ -480,37 +480,37 @@ declare <4 x float> @llvm.x86.avx2.gathe
define <4 x float> @test_gatherqps_ymm(<4 x float> %a0, i8* %a1, <4 x i64> %a2, <4 x float> %a3) {
; GENERIC-LABEL: test_gatherqps_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_gatherqps_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0 # sched: [1:?]
; HASWELL-NEXT: vzeroupper # sched: [4:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_gatherqps_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0 # sched: [24:5.00]
; BROADWELL-NEXT: vzeroupper # sched: [4:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_gatherqps_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0 # sched: [25:1.00]
; SKYLAKE-NEXT: vzeroupper # sched: [4:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_gatherqps_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0 # sched: [25:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_gatherqps_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -521,42 +521,42 @@ declare <4 x float> @llvm.x86.avx2.gathe
define <8 x i32> @test_inserti128(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_inserti128:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:1.00]
; GENERIC-NEXT: vinserti128 $1, (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_inserti128:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vinserti128 $1, (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_inserti128:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
; BROADWELL-NEXT: vinserti128 $1, (%rdi), %ymm0, %ymm0 # sched: [6:0.50]
; BROADWELL-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_inserti128:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
; SKYLAKE-NEXT: vinserti128 $1, (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; SKYLAKE-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_inserti128:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
; SKX-NEXT: vinserti128 $1, (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; SKX-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_inserti128:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 # sched: [2:0.25]
; ZNVER1-NEXT: vinserti128 $1, (%rdi), %ymm0, %ymm0 # sched: [9:0.50]
; ZNVER1-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
@@ -572,32 +572,32 @@ define <8 x i32> @test_inserti128(<8 x i
define <4 x i64> @test_movntdqa(i8* %a0) {
; GENERIC-LABEL: test_movntdqa:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [4:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_movntdqa:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_movntdqa:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [6:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_movntdqa:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_movntdqa:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_movntdqa:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i64> @llvm.x86.avx2.movntdqa(i8* %a0)
@@ -607,37 +607,37 @@ declare <4 x i64> @llvm.x86.avx2.movntdq
define <16 x i16> @test_mpsadbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_mpsadbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_mpsadbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [7:2.00]
; HASWELL-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_mpsadbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [7:2.00]
; BROADWELL-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [13:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_mpsadbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [4:2.00]
; SKYLAKE-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [11:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_mpsadbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [4:2.00]
; SKX-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [11:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_mpsadbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -651,42 +651,42 @@ declare <16 x i16> @llvm.x86.avx2.mpsadb
define <32 x i8> @test_pabsb(<32 x i8> %a0, <32 x i8> *%a1) {
; GENERIC-LABEL: test_pabsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpabsb %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpabsb (%rdi), %ymm1 # sched: [7:1.00]
; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pabsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpabsb (%rdi), %ymm1 # sched: [1:0.50]
; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pabsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpabsb (%rdi), %ymm1 # sched: [7:0.50]
; BROADWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pabsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpabsb (%rdi), %ymm1 # sched: [8:0.50]
; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pabsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpabsb (%rdi), %ymm1 # sched: [8:0.50]
; SKX-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pabsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpabsb (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -701,42 +701,42 @@ declare <32 x i8> @llvm.x86.avx2.pabs.b(
define <8 x i32> @test_pabsd(<8 x i32> %a0, <8 x i32> *%a1) {
; GENERIC-LABEL: test_pabsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpabsd %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpabsd (%rdi), %ymm1 # sched: [7:1.00]
; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pabsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpabsd (%rdi), %ymm1 # sched: [1:0.50]
; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pabsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpabsd (%rdi), %ymm1 # sched: [7:0.50]
; BROADWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pabsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpabsd (%rdi), %ymm1 # sched: [8:0.50]
; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pabsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpabsd (%rdi), %ymm1 # sched: [8:0.50]
; SKX-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pabsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpabsd (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -751,42 +751,42 @@ declare <8 x i32> @llvm.x86.avx2.pabs.d(
define <16 x i16> @test_pabsw(<16 x i16> %a0, <16 x i16> *%a1) {
; GENERIC-LABEL: test_pabsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpabsw %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpabsw (%rdi), %ymm1 # sched: [7:1.00]
; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pabsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpabsw (%rdi), %ymm1 # sched: [1:0.50]
; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pabsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpabsw (%rdi), %ymm1 # sched: [7:0.50]
; BROADWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pabsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpabsw (%rdi), %ymm1 # sched: [8:0.50]
; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pabsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpabsw (%rdi), %ymm1 # sched: [8:0.50]
; SKX-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pabsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpabsw (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -801,37 +801,37 @@ declare <16 x i16> @llvm.x86.avx2.pabs.w
define <16 x i16> @test_packssdw(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_packssdw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packssdw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_packssdw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_packssdw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_packssdw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKX-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_packssdw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -845,37 +845,37 @@ declare <16 x i16> @llvm.x86.avx2.packss
define <32 x i8> @test_packsswb(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_packsswb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packsswb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_packsswb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_packsswb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_packsswb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKX-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_packsswb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -889,37 +889,37 @@ declare <32 x i8> @llvm.x86.avx2.packssw
define <16 x i16> @test_packusdw(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_packusdw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packusdw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_packusdw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_packusdw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_packusdw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKX-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_packusdw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -933,37 +933,37 @@ declare <16 x i16> @llvm.x86.avx2.packus
define <32 x i8> @test_packuswb(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_packuswb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_packuswb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_packuswb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_packuswb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_packuswb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKX-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_packuswb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -977,37 +977,37 @@ declare <32 x i8> @llvm.x86.avx2.packusw
define <32 x i8> @test_paddb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_paddb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_paddb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1019,37 +1019,37 @@ define <32 x i8> @test_paddb(<32 x i8> %
define <8 x i32> @test_paddd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_paddd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_paddd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1061,37 +1061,37 @@ define <8 x i32> @test_paddd(<8 x i32> %
define <4 x i64> @test_paddq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_paddq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_paddq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1103,37 +1103,37 @@ define <4 x i64> @test_paddq(<4 x i64> %
define <32 x i8> @test_paddsb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_paddsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_paddsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1146,37 +1146,37 @@ declare <32 x i8> @llvm.x86.avx2.padds.b
define <16 x i16> @test_paddsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_paddsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_paddsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1189,37 +1189,37 @@ declare <16 x i16> @llvm.x86.avx2.padds.
define <32 x i8> @test_paddusb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_paddusb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddusb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddusb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddusb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddusb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_paddusb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1232,37 +1232,37 @@ declare <32 x i8> @llvm.x86.avx2.paddus.
define <16 x i16> @test_paddusw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_paddusw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddusw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddusw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddusw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddusw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_paddusw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1275,37 +1275,37 @@ declare <16 x i16> @llvm.x86.avx2.paddus
define <16 x i16> @test_paddw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_paddw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_paddw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_paddw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_paddw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_paddw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_paddw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1317,37 +1317,37 @@ define <16 x i16> @test_paddw(<16 x i16>
define <32 x i8> @test_palignr(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_palignr:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
; GENERIC-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_palignr:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
; HASWELL-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_palignr:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
; BROADWELL-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_palignr:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
; SKYLAKE-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_palignr:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
; SKX-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_palignr:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:0.25]
; ZNVER1-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1359,42 +1359,42 @@ define <32 x i8> @test_palignr(<32 x i8>
define <4 x i64> @test_pand(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_pand:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pand:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pand:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pand:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pand:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pand:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -1408,42 +1408,42 @@ define <4 x i64> @test_pand(<4 x i64> %a
define <4 x i64> @test_pandn(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_pandn:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pandn:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pandn:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [7:0.50]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pandn:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [8:0.50]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pandn:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [8:0.50]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pandn:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -1459,37 +1459,37 @@ define <4 x i64> @test_pandn(<4 x i64> %
define <32 x i8> @test_pavgb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pavgb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pavgb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pavgb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pavgb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pavgb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pavgb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1511,37 +1511,37 @@ define <32 x i8> @test_pavgb(<32 x i8> %
define <16 x i16> @test_pavgw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pavgw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pavgw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pavgw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pavgw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pavgw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pavgw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1563,42 +1563,42 @@ define <16 x i16> @test_pavgw(<16 x i16>
define <4 x i32> @test_pblendd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_pblendd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.50]
; GENERIC-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [5:0.50]
; GENERIC-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pblendd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.33]
; HASWELL-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [1:0.50]
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pblendd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.33]
; BROADWELL-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [6:0.50]
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pblendd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.33]
; SKYLAKE-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [7:0.50]
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pblendd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.33]
; SKX-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [7:0.50]
; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pblendd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.50]
; ZNVER1-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [8:1.00]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -1612,42 +1612,42 @@ define <4 x i32> @test_pblendd(<4 x i32>
define <8 x i32> @test_pblendd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pblendd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.50]
; GENERIC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [5:0.50]
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pblendd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.33]
; HASWELL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [1:0.50]
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pblendd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.33]
; BROADWELL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [7:0.50]
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pblendd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.33]
; SKYLAKE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [8:0.50]
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pblendd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.33]
; SKX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [8:0.50]
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pblendd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.50]
; ZNVER1-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [9:1.50]
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -1661,37 +1661,37 @@ define <8 x i32> @test_pblendd_ymm(<8 x
define <32 x i8> @test_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2, <32 x i8> *%a3, <32 x i8> %a4) {
; GENERIC-LABEL: test_pblendvb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
; GENERIC-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pblendvb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
; HASWELL-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [2:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pblendvb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
; BROADWELL-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [8:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pblendvb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:0.67]
; SKYLAKE-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [8:0.67]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pblendvb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:0.67]
; SKX-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [8:0.67]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pblendvb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; ZNVER1-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1704,37 +1704,37 @@ declare <32 x i8> @llvm.x86.avx2.pblendv
define <16 x i16> @test_pblendw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pblendw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [1:0.50]
; GENERIC-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [5:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pblendw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [1:1.00]
; HASWELL-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [4:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pblendw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [1:1.00]
; BROADWELL-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pblendw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [1:1.00]
; SKYLAKE-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pblendw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [1:1.00]
; SKX-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pblendw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [2:0.33]
; ZNVER1-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [9:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -1746,42 +1746,42 @@ define <16 x i16> @test_pblendw(<16 x i1
define <16 x i8> @test_pbroadcastb(<16 x i8> %a0, <16 x i8> *%a1) {
; GENERIC-LABEL: test_pbroadcastb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastb %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastb (%rdi), %xmm1 # sched: [4:0.50]
; GENERIC-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pbroadcastb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpbroadcastb %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vpbroadcastb (%rdi), %xmm1 # sched: [4:1.00]
; HASWELL-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pbroadcastb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpbroadcastb %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpbroadcastb (%rdi), %xmm1 # sched: [9:1.00]
; BROADWELL-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pbroadcastb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpbroadcastb %xmm0, %xmm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpbroadcastb (%rdi), %xmm1 # sched: [7:1.00]
; SKYLAKE-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pbroadcastb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastb %xmm0, %xmm0 # sched: [3:1.00]
; SKX-NEXT: vpbroadcastb (%rdi), %xmm1 # sched: [7:1.00]
; SKX-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pbroadcastb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpbroadcastb (%rdi), %xmm1 # sched: [8:1.00]
; ZNVER1-NEXT: vpbroadcastb %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -1795,42 +1795,42 @@ define <16 x i8> @test_pbroadcastb(<16 x
define <32 x i8> @test_pbroadcastb_ymm(<32 x i8> %a0, <32 x i8> *%a1) {
; GENERIC-LABEL: test_pbroadcastb_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastb %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastb (%rdi), %ymm1 # sched: [4:0.50]
; GENERIC-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pbroadcastb_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpbroadcastb %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpbroadcastb (%rdi), %ymm1 # sched: [4:1.00]
; HASWELL-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pbroadcastb_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpbroadcastb %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpbroadcastb (%rdi), %ymm1 # sched: [9:1.00]
; BROADWELL-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pbroadcastb_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpbroadcastb %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpbroadcastb (%rdi), %ymm1 # sched: [8:1.00]
; SKYLAKE-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pbroadcastb_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastb %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpbroadcastb (%rdi), %ymm1 # sched: [8:1.00]
; SKX-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pbroadcastb_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpbroadcastb (%rdi), %ymm1 # sched: [8:2.00]
; ZNVER1-NEXT: vpbroadcastb %xmm0, %ymm0 # sched: [2:0.25]
; ZNVER1-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -1844,41 +1844,41 @@ define <32 x i8> @test_pbroadcastb_ymm(<
define <4 x i32> @test_pbroadcastd(<4 x i32> %a0, <4 x i32> *%a1) {
; GENERIC-LABEL: test_pbroadcastd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastd %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastd (%rdi), %xmm1 # sched: [4:0.50]
; GENERIC-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pbroadcastd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpbroadcastd %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpbroadcastd (%rdi), %xmm1 # sched: [1:0.50]
; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pbroadcastd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpbroadcastd %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpbroadcastd (%rdi), %xmm1 # sched: [5:0.50]
; BROADWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pbroadcastd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpbroadcastd %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpbroadcastd (%rdi), %xmm1 # sched: [6:0.50]
; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pbroadcastd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastd %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpaddd (%rdi){1to4}, %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pbroadcastd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpbroadcastd (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpbroadcastd %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -1892,41 +1892,41 @@ define <4 x i32> @test_pbroadcastd(<4 x
define <8 x i32> @test_pbroadcastd_ymm(<8 x i32> %a0, <8 x i32> *%a1) {
; GENERIC-LABEL: test_pbroadcastd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastd %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastd (%rdi), %ymm1 # sched: [4:0.50]
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pbroadcastd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpbroadcastd %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpbroadcastd (%rdi), %ymm1 # sched: [1:0.50]
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pbroadcastd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpbroadcastd %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpbroadcastd (%rdi), %ymm1 # sched: [6:0.50]
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pbroadcastd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpbroadcastd %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpbroadcastd (%rdi), %ymm1 # sched: [7:0.50]
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pbroadcastd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastd %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpaddd (%rdi){1to8}, %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pbroadcastd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpbroadcastd (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpbroadcastd %xmm0, %ymm0 # sched: [2:0.25]
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -1940,41 +1940,41 @@ define <8 x i32> @test_pbroadcastd_ymm(<
define <2 x i64> @test_pbroadcastq(<2 x i64> %a0, <2 x i64> *%a1) {
; GENERIC-LABEL: test_pbroadcastq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastq %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastq (%rdi), %xmm1 # sched: [4:0.50]
; GENERIC-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pbroadcastq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpbroadcastq %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpbroadcastq (%rdi), %xmm1 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pbroadcastq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpbroadcastq %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpbroadcastq (%rdi), %xmm1 # sched: [5:0.50]
; BROADWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pbroadcastq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpbroadcastq %xmm0, %xmm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpbroadcastq (%rdi), %xmm1 # sched: [6:0.50]
; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pbroadcastq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastq %xmm0, %xmm0 # sched: [1:1.00]
; SKX-NEXT: vpaddq (%rdi){1to2}, %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pbroadcastq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpbroadcastq (%rdi), %xmm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpbroadcastq %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -1988,41 +1988,41 @@ define <2 x i64> @test_pbroadcastq(<2 x
define <4 x i64> @test_pbroadcastq_ymm(<4 x i64> %a0, <4 x i64> *%a1) {
; GENERIC-LABEL: test_pbroadcastq_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastq %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastq (%rdi), %ymm1 # sched: [4:0.50]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pbroadcastq_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpbroadcastq %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpbroadcastq (%rdi), %ymm1 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pbroadcastq_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpbroadcastq %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpbroadcastq (%rdi), %ymm1 # sched: [6:0.50]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pbroadcastq_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpbroadcastq %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpbroadcastq (%rdi), %ymm1 # sched: [7:0.50]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pbroadcastq_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastq %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpaddq (%rdi){1to4}, %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pbroadcastq_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpbroadcastq (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpbroadcastq %xmm0, %ymm0 # sched: [2:0.25]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -2036,42 +2036,42 @@ define <4 x i64> @test_pbroadcastq_ymm(<
define <8 x i16> @test_pbroadcastw(<8 x i16> %a0, <8 x i16> *%a1) {
; GENERIC-LABEL: test_pbroadcastw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastw %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastw (%rdi), %xmm1 # sched: [4:0.50]
; GENERIC-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pbroadcastw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpbroadcastw %xmm0, %xmm0 # sched: [3:1.00]
; HASWELL-NEXT: vpbroadcastw (%rdi), %xmm1 # sched: [4:1.00]
; HASWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pbroadcastw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpbroadcastw %xmm0, %xmm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpbroadcastw (%rdi), %xmm1 # sched: [9:1.00]
; BROADWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pbroadcastw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpbroadcastw %xmm0, %xmm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpbroadcastw (%rdi), %xmm1 # sched: [7:1.00]
; SKYLAKE-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pbroadcastw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastw %xmm0, %xmm0 # sched: [3:1.00]
; SKX-NEXT: vpbroadcastw (%rdi), %xmm1 # sched: [7:1.00]
; SKX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pbroadcastw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpbroadcastw (%rdi), %xmm1 # sched: [8:1.00]
; ZNVER1-NEXT: vpbroadcastw %xmm0, %xmm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
@@ -2085,42 +2085,42 @@ define <8 x i16> @test_pbroadcastw(<8 x
define <16 x i16> @test_pbroadcastw_ymm(<16 x i16> %a0, <16 x i16> *%a1) {
; GENERIC-LABEL: test_pbroadcastw_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpbroadcastw %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastw (%rdi), %ymm1 # sched: [4:0.50]
; GENERIC-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pbroadcastw_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpbroadcastw %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpbroadcastw (%rdi), %ymm1 # sched: [4:1.00]
; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pbroadcastw_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpbroadcastw %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpbroadcastw (%rdi), %ymm1 # sched: [9:1.00]
; BROADWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pbroadcastw_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpbroadcastw %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpbroadcastw (%rdi), %ymm1 # sched: [8:1.00]
; SKYLAKE-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pbroadcastw_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpbroadcastw %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpbroadcastw (%rdi), %ymm1 # sched: [8:1.00]
; SKX-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pbroadcastw_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpbroadcastw (%rdi), %ymm1 # sched: [8:2.00]
; ZNVER1-NEXT: vpbroadcastw %xmm0, %ymm0 # sched: [2:0.25]
; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -2134,31 +2134,31 @@ define <16 x i16> @test_pbroadcastw_ymm(
define <32 x i8> @test_pcmpeqb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pcmpeqb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpeqb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpeqb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpeqb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqb %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2b %k0, %ymm0
; SKX-NEXT: vpcmpeqb (%rdi), %ymm0, %k0 # sched: [10:1.00]
@@ -2166,7 +2166,7 @@ define <32 x i8> @test_pcmpeqb(<32 x i8>
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pcmpeqb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2180,31 +2180,31 @@ define <32 x i8> @test_pcmpeqb(<32 x i8>
define <8 x i32> @test_pcmpeqd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pcmpeqd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpeqd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpeqd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpeqd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2d %k0, %ymm0
; SKX-NEXT: vpcmpeqd (%rdi), %ymm0, %k0 # sched: [10:1.00]
@@ -2212,7 +2212,7 @@ define <8 x i32> @test_pcmpeqd(<8 x i32>
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pcmpeqd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2226,31 +2226,31 @@ define <8 x i32> @test_pcmpeqd(<8 x i32>
define <4 x i64> @test_pcmpeqq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_pcmpeqq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpeqq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpeqq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpeqq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2q %k0, %ymm0
; SKX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0 # sched: [10:1.00]
@@ -2258,7 +2258,7 @@ define <4 x i64> @test_pcmpeqq(<4 x i64>
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pcmpeqq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2272,31 +2272,31 @@ define <4 x i64> @test_pcmpeqq(<4 x i64>
define <16 x i16> @test_pcmpeqw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pcmpeqw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpeqw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpeqw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpeqw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpeqw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2w %k0, %ymm0
; SKX-NEXT: vpcmpeqw (%rdi), %ymm0, %k0 # sched: [10:1.00]
@@ -2304,7 +2304,7 @@ define <16 x i16> @test_pcmpeqw(<16 x i1
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pcmpeqw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2318,31 +2318,31 @@ define <16 x i16> @test_pcmpeqw(<16 x i1
define <32 x i8> @test_pcmpgtb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pcmpgtb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpgtb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpgtb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpgtb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtb %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2b %k0, %ymm0
; SKX-NEXT: vpcmpgtb (%rdi), %ymm0, %k0 # sched: [10:1.00]
@@ -2350,7 +2350,7 @@ define <32 x i8> @test_pcmpgtb(<32 x i8>
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pcmpgtb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2364,31 +2364,31 @@ define <32 x i8> @test_pcmpgtb(<32 x i8>
define <8 x i32> @test_pcmpgtd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pcmpgtd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpgtd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpgtd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpgtd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2d %k0, %ymm0
; SKX-NEXT: vpcmpgtd (%rdi), %ymm0, %k0 # sched: [10:1.00]
@@ -2396,7 +2396,7 @@ define <8 x i32> @test_pcmpgtd(<8 x i32>
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pcmpgtd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2410,31 +2410,31 @@ define <8 x i32> @test_pcmpgtd(<8 x i32>
define <4 x i64> @test_pcmpgtq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_pcmpgtq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpgtq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpgtq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpgtq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2q %k0, %ymm0
; SKX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0 # sched: [10:1.00]
@@ -2442,7 +2442,7 @@ define <4 x i64> @test_pcmpgtq(<4 x i64>
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pcmpgtq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2456,31 +2456,31 @@ define <4 x i64> @test_pcmpgtq(<4 x i64>
define <16 x i16> @test_pcmpgtw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pcmpgtw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pcmpgtw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pcmpgtw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pcmpgtw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pcmpgtw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpmovm2w %k0, %ymm0
; SKX-NEXT: vpcmpgtw (%rdi), %ymm0, %k0 # sched: [10:1.00]
@@ -2488,7 +2488,7 @@ define <16 x i16> @test_pcmpgtw(<16 x i1
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pcmpgtw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2502,42 +2502,42 @@ define <16 x i16> @test_pcmpgtw(<16 x i1
define <4 x i64> @test_perm2i128(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_perm2i128:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
; GENERIC-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_perm2i128:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; HASWELL-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [3:1.00]
; HASWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_perm2i128:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; BROADWELL-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [9:1.00]
; BROADWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_perm2i128:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; SKYLAKE-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [10:1.00]
; SKYLAKE-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_perm2i128:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
; SKX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [10:1.00]
; SKX-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_perm2i128:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [2:0.25]
; ZNVER1-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [9:0.50]
; ZNVER1-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
@@ -2551,42 +2551,42 @@ define <4 x i64> @test_perm2i128(<4 x i6
define <8 x i32> @test_permd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_permd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermd %ymm1, %ymm0, %ymm1 # sched: [1:1.00]
; GENERIC-NEXT: vpermd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpermd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vpermd (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_permd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpermd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; BROADWELL-NEXT: vpermd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; BROADWELL-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_permd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpermd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; SKYLAKE-NEXT: vpermd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKYLAKE-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_permd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; SKX-NEXT: vpermd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKX-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_permd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpermd %ymm1, %ymm0, %ymm1 # sched: [2:0.25]
; ZNVER1-NEXT: vpermd (%rdi), %ymm0, %ymm0 # sched: [9:0.50]
; ZNVER1-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
@@ -2601,42 +2601,42 @@ declare <8 x i32> @llvm.x86.avx2.permd(<
define <4 x double> @test_permpd(<4 x double> %a0, <4 x double> *%a1) {
; GENERIC-LABEL: test_permpd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [1:1.00]
; GENERIC-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,2,2,3] sched: [5:1.00]
; GENERIC-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permpd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
; HASWELL-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,2,2,3] sched: [3:1.00]
; HASWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_permpd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
; BROADWELL-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,2,2,3] sched: [9:1.00]
; BROADWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_permpd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
; SKYLAKE-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,2,2,3] sched: [10:1.00]
; SKYLAKE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_permpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
; SKX-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,2,2,3] sched: [10:1.00]
; SKX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_permpd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,2,2,3] sched: [107:0.50]
; ZNVER1-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [100:0.25]
; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
@@ -2650,42 +2650,42 @@ define <4 x double> @test_permpd(<4 x do
define <8 x float> @test_permps(<8 x i32> %a0, <8 x float> %a1, <8 x float> *%a2) {
; GENERIC-LABEL: test_permps:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermps %ymm1, %ymm0, %ymm1 # sched: [1:1.00]
; GENERIC-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permps:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpermps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_permps:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpermps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; BROADWELL-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; BROADWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_permps:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpermps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; SKYLAKE-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKYLAKE-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_permps:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
; SKX-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKX-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_permps:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpermps %ymm1, %ymm0, %ymm1 # sched: [100:0.25]
; ZNVER1-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [107:0.50]
; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
@@ -2700,42 +2700,42 @@ declare <8 x float> @llvm.x86.avx2.permp
define <4 x i64> @test_permq(<4 x i64> %a0, <4 x i64> *%a1) {
; GENERIC-LABEL: test_permq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [1:1.00]
; GENERIC-NEXT: vpermq {{.*#+}} ymm1 = mem[0,2,2,3] sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_permq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
; HASWELL-NEXT: vpermq {{.*#+}} ymm1 = mem[0,2,2,3] sched: [3:1.00]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_permq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
; BROADWELL-NEXT: vpermq {{.*#+}} ymm1 = mem[0,2,2,3] sched: [9:1.00]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_permq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
; SKYLAKE-NEXT: vpermq {{.*#+}} ymm1 = mem[0,2,2,3] sched: [10:1.00]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_permq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
; SKX-NEXT: vpermq {{.*#+}} ymm1 = mem[0,2,2,3] sched: [10:1.00]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_permq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpermq {{.*#+}} ymm1 = mem[0,2,2,3] sched: [9:0.50]
; ZNVER1-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [2:0.25]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -2749,32 +2749,32 @@ define <4 x i64> @test_permq(<4 x i64> %
define <4 x i32> @test_pgatherdd(<4 x i32> %a0, i8* %a1, <4 x i32> %a2, <4 x i32> %a3) {
; GENERIC-LABEL: test_pgatherdd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pgatherdd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pgatherdd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pgatherdd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pgatherdd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pgatherdd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> %a0, i8* %a1, <4 x i32> %a2, <4 x i32> %a3, i8 2)
@@ -2784,32 +2784,32 @@ declare <4 x i32> @llvm.x86.avx2.gather.
define <8 x i32> @test_pgatherdd_ymm(<8 x i32> %a0, i8* %a1, <8 x i32> %a2, <8 x i32> %a3) {
; GENERIC-LABEL: test_pgatherdd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pgatherdd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pgatherdd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pgatherdd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [25:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pgatherdd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [25:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pgatherdd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %a0, i8* %a1, <8 x i32> %a2, <8 x i32> %a3, i8 2)
@@ -2819,32 +2819,32 @@ declare <8 x i32> @llvm.x86.avx2.gather.
define <2 x i64> @test_pgatherdq(<2 x i64> %a0, i8* %a1, <4 x i32> %a2, <2 x i64> %a3) {
; GENERIC-LABEL: test_pgatherdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pgatherdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pgatherdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pgatherdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pgatherdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pgatherdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> %a0, i8* %a1, <4 x i32> %a2, <2 x i64> %a3, i8 2)
@@ -2854,32 +2854,32 @@ declare <2 x i64> @llvm.x86.avx2.gather.
define <4 x i64> @test_pgatherdq_ymm(<4 x i64> %a0, i8* %a1, <4 x i32> %a2, <4 x i64> %a3) {
; GENERIC-LABEL: test_pgatherdq_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pgatherdq_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pgatherdq_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pgatherdq_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 # sched: [25:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pgatherdq_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 # sched: [25:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pgatherdq_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %a0, i8* %a1, <4 x i32> %a2, <4 x i64> %a3, i8 2)
@@ -2889,32 +2889,32 @@ declare <4 x i64> @llvm.x86.avx2.gather.
define <4 x i32> @test_pgatherqd(<4 x i32> %a0, i8* %a1, <2 x i64> %a2, <4 x i32> %a3) {
; GENERIC-LABEL: test_pgatherqd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pgatherqd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pgatherqd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pgatherqd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pgatherqd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pgatherqd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> %a0, i8* %a1, <2 x i64> %a2, <4 x i32> %a3, i8 2)
@@ -2924,37 +2924,37 @@ declare <4 x i32> @llvm.x86.avx2.gather.
define <4 x i32> @test_pgatherqd_ymm(<4 x i32> %a0, i8* %a1, <4 x i64> %a2, <4 x i32> %a3) {
; GENERIC-LABEL: test_pgatherqd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pgatherqd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: vzeroupper # sched: [4:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pgatherqd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0
; BROADWELL-NEXT: vzeroupper # sched: [4:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pgatherqd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 # sched: [25:1.00]
; SKYLAKE-NEXT: vzeroupper # sched: [4:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pgatherqd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 # sched: [25:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pgatherqd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -2965,32 +2965,32 @@ declare <4 x i32> @llvm.x86.avx2.gather.
define <2 x i64> @test_pgatherqq(<2 x i64> %a0, i8 *%a1, <2 x i64> %a2, <2 x i64> %a3) {
; GENERIC-LABEL: test_pgatherqq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pgatherqq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pgatherqq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pgatherqq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pgatherqq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [22:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pgatherqq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> %a0, i8* %a1, <2 x i64> %a2, <2 x i64> %a3, i8 2)
@@ -3000,32 +3000,32 @@ declare <2 x i64> @llvm.x86.avx2.gather.
define <4 x i64> @test_pgatherqq_ymm(<4 x i64> %a0, i8 *%a1, <4 x i64> %a2, <4 x i64> %a3) {
; GENERIC-LABEL: test_pgatherqq_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pgatherqq_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [1:?]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pgatherqq_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pgatherqq_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [25:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pgatherqq_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [25:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pgatherqq_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %a0, i8* %a1, <4 x i64> %a2, <4 x i64> %a3, i8 2)
@@ -3035,37 +3035,37 @@ declare <4 x i64> @llvm.x86.avx2.gather.
define <8 x i32> @test_phaddd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_phaddd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; GENERIC-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phaddd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phaddd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phaddd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phaddd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKX-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_phaddd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3078,37 +3078,37 @@ declare <8 x i32> @llvm.x86.avx2.phadd.d
define <16 x i16> @test_phaddsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_phaddsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phaddsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phaddsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phaddsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phaddsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKX-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_phaddsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3121,37 +3121,37 @@ declare <16 x i16> @llvm.x86.avx2.phadd.
define <16 x i16> @test_phaddw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_phaddw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; GENERIC-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phaddw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phaddw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phaddw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phaddw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKX-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_phaddw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3164,37 +3164,37 @@ declare <16 x i16> @llvm.x86.avx2.phadd.
define <8 x i32> @test_phsubd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_phsubd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; GENERIC-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phsubd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phsubd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phsubd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phsubd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKX-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_phsubd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3207,37 +3207,37 @@ declare <8 x i32> @llvm.x86.avx2.phsub.d
define <16 x i16> @test_phsubsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_phsubsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phsubsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phsubsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phsubsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phsubsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKX-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_phsubsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3250,37 +3250,37 @@ declare <16 x i16> @llvm.x86.avx2.phsub.
define <16 x i16> @test_phsubw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_phsubw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; GENERIC-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_phsubw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_phsubw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_phsubw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKYLAKE-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_phsubw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; SKX-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_phsubw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3293,37 +3293,37 @@ declare <16 x i16> @llvm.x86.avx2.phsub.
define <16 x i16> @test_pmaddubsw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pmaddubsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaddubsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaddubsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaddubsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaddubsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaddubsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3337,37 +3337,37 @@ declare <16 x i16> @llvm.x86.avx2.pmadd.
define <8 x i32> @test_pmaddwd(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pmaddwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaddwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaddwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaddwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaddwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaddwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3381,42 +3381,42 @@ declare <8 x i32> @llvm.x86.avx2.pmadd.w
define <4 x i32> @test_pmaskmovd(i8* %a0, <4 x i32> %a1, <4 x i32> %a2) {
; GENERIC-LABEL: test_pmaskmovd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2
; GENERIC-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi)
; GENERIC-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaskmovd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [2:2.00]
; HASWELL-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) # sched: [4:1.00]
; HASWELL-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaskmovd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [7:2.00]
; BROADWELL-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
; BROADWELL-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaskmovd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [7:0.50]
; SKYLAKE-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaskmovd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [7:0.50]
; SKX-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaskmovd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [100:?]
; ZNVER1-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) # sched: [100:?]
; ZNVER1-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
@@ -3430,42 +3430,42 @@ declare void @llvm.x86.avx2.maskstore.d(
define <8 x i32> @test_pmaskmovd_ymm(i8* %a0, <8 x i32> %a1, <8 x i32> %a2) {
; GENERIC-LABEL: test_pmaskmovd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2
; GENERIC-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi)
; GENERIC-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaskmovd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [2:2.00]
; HASWELL-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) # sched: [4:1.00]
; HASWELL-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaskmovd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [8:2.00]
; BROADWELL-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
; BROADWELL-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaskmovd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [8:0.50]
; SKYLAKE-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaskmovd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [8:0.50]
; SKX-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaskmovd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [100:?]
; ZNVER1-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) # sched: [100:?]
; ZNVER1-NEXT: vmovdqa %ymm2, %ymm0 # sched: [2:0.25]
@@ -3479,42 +3479,42 @@ declare void @llvm.x86.avx2.maskstore.d.
define <2 x i64> @test_pmaskmovq(i8* %a0, <2 x i64> %a1, <2 x i64> %a2) {
; GENERIC-LABEL: test_pmaskmovq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2
; GENERIC-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi)
; GENERIC-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaskmovq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2 # sched: [2:2.00]
; HASWELL-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) # sched: [4:1.00]
; HASWELL-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaskmovq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2 # sched: [7:2.00]
; BROADWELL-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) # sched: [5:1.00]
; BROADWELL-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaskmovq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2 # sched: [7:0.50]
; SKYLAKE-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaskmovq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2 # sched: [7:0.50]
; SKX-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaskmovq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2 # sched: [8:1.00]
; ZNVER1-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) # sched: [100:?]
; ZNVER1-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
@@ -3528,42 +3528,42 @@ declare void @llvm.x86.avx2.maskstore.q(
define <4 x i64> @test_pmaskmovq_ymm(i8* %a0, <4 x i64> %a1, <4 x i64> %a2) {
; GENERIC-LABEL: test_pmaskmovq_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2
; GENERIC-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi)
; GENERIC-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaskmovq_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2 # sched: [2:2.00]
; HASWELL-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) # sched: [4:1.00]
; HASWELL-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaskmovq_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2 # sched: [8:2.00]
; BROADWELL-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
; BROADWELL-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaskmovq_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2 # sched: [8:0.50]
; SKYLAKE-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) # sched: [2:1.00]
; SKYLAKE-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaskmovq_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2 # sched: [8:0.50]
; SKX-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) # sched: [2:1.00]
; SKX-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaskmovq_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2 # sched: [9:1.50]
; ZNVER1-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) # sched: [100:?]
; ZNVER1-NEXT: vmovdqa %ymm2, %ymm0 # sched: [2:0.25]
@@ -3577,37 +3577,37 @@ declare void @llvm.x86.avx2.maskstore.q.
define <32 x i8> @test_pmaxsb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pmaxsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaxsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3620,37 +3620,37 @@ declare <32 x i8> @llvm.x86.avx2.pmaxs.b
define <8 x i32> @test_pmaxsd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pmaxsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaxsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3663,37 +3663,37 @@ declare <8 x i32> @llvm.x86.avx2.pmaxs.d
define <16 x i16> @test_pmaxsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pmaxsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaxsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3706,37 +3706,37 @@ declare <16 x i16> @llvm.x86.avx2.pmaxs.
define <32 x i8> @test_pmaxub(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pmaxub:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxub:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxub:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxub:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxub:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaxub:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3749,37 +3749,37 @@ declare <32 x i8> @llvm.x86.avx2.pmaxu.b
define <8 x i32> @test_pmaxud(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pmaxud:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxud:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxud:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxud:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxud:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaxud:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3792,37 +3792,37 @@ declare <8 x i32> @llvm.x86.avx2.pmaxu.d
define <16 x i16> @test_pmaxuw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pmaxuw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmaxuw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmaxuw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmaxuw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmaxuw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmaxuw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3835,37 +3835,37 @@ declare <16 x i16> @llvm.x86.avx2.pmaxu.
define <32 x i8> @test_pminsb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pminsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pminsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3878,37 +3878,37 @@ declare <32 x i8> @llvm.x86.avx2.pmins.b
define <8 x i32> @test_pminsd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pminsd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminsd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminsd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminsd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminsd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pminsd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3921,37 +3921,37 @@ declare <8 x i32> @llvm.x86.avx2.pmins.d
define <16 x i16> @test_pminsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pminsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pminsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -3964,37 +3964,37 @@ declare <16 x i16> @llvm.x86.avx2.pmins.
define <32 x i8> @test_pminub(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pminub:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminub:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminub:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminub:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminub:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pminub:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4007,37 +4007,37 @@ declare <32 x i8> @llvm.x86.avx2.pminu.b
define <8 x i32> @test_pminud(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pminud:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminud:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminud:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminud:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminud:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pminud:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4050,37 +4050,37 @@ declare <8 x i32> @llvm.x86.avx2.pminu.d
define <16 x i16> @test_pminuw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pminuw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pminuw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pminuw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pminuw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pminuw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pminuw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4093,37 +4093,37 @@ declare <16 x i16> @llvm.x86.avx2.pminu.
define i32 @test_pmovmskb(<32 x i8> %a0) {
; GENERIC-LABEL: test_pmovmskb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovmskb %ymm0, %eax # sched: [1:1.00]
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovmskb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovmskb %ymm0, %eax # sched: [3:1.00]
; HASWELL-NEXT: vzeroupper # sched: [4:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovmskb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovmskb %ymm0, %eax # sched: [3:1.00]
; BROADWELL-NEXT: vzeroupper # sched: [4:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovmskb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovmskb %ymm0, %eax # sched: [2:1.00]
; SKYLAKE-NEXT: vzeroupper # sched: [4:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovmskb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovmskb %ymm0, %eax # sched: [2:1.00]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovmskb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovmskb %ymm0, %eax # sched: [2:1.00]
; ZNVER1-NEXT: vzeroupper # sched: [100:?]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4134,42 +4134,42 @@ declare i32 @llvm.x86.avx2.pmovmskb(<32
define <8 x i32> @test_pmovsxbd(<16 x i8> %a0, <16 x i8> *%a1) {
; GENERIC-LABEL: test_pmovsxbd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [5:1.00]
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxbd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxbd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [8:1.00]
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxbd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [8:1.00]
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxbd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [8:1.00]
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovsxbd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4185,42 +4185,42 @@ define <8 x i32> @test_pmovsxbd(<16 x i8
define <4 x i64> @test_pmovsxbq(<16 x i8> %a0, <16 x i8> *%a1) {
; GENERIC-LABEL: test_pmovsxbq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxbq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxbq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [8:1.00]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxbq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [8:1.00]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxbq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [8:1.00]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovsxbq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4236,42 +4236,42 @@ define <4 x i64> @test_pmovsxbq(<16 x i8
define <16 x i16> @test_pmovsxbw(<16 x i8> %a0, <16 x i8> *%a1) {
; GENERIC-LABEL: test_pmovsxbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [5:1.00]
; GENERIC-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [8:1.00]
; BROADWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [9:1.00]
; SKYLAKE-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [9:1.00]
; SKX-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovsxbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4285,42 +4285,42 @@ define <16 x i16> @test_pmovsxbw(<16 x i
define <4 x i64> @test_pmovsxdq(<4 x i32> %a0, <4 x i32> *%a1) {
; GENERIC-LABEL: test_pmovsxdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [8:1.00]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [9:1.00]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [9:1.00]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovsxdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4334,42 +4334,42 @@ define <4 x i64> @test_pmovsxdq(<4 x i32
define <8 x i32> @test_pmovsxwd(<8 x i16> %a0, <8 x i16> *%a1) {
; GENERIC-LABEL: test_pmovsxwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [5:1.00]
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [8:1.00]
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [9:1.00]
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [9:1.00]
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovsxwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4383,42 +4383,42 @@ define <8 x i32> @test_pmovsxwd(<8 x i16
define <4 x i64> @test_pmovsxwq(<8 x i16> %a0, <8 x i16> *%a1) {
; GENERIC-LABEL: test_pmovsxwq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovsxwq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [3:1.00]
; HASWELL-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [3:1.00]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovsxwq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [3:1.00]
; BROADWELL-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [8:1.00]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovsxwq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [8:1.00]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovsxwq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [8:1.00]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovsxwq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [8:0.50]
; ZNVER1-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4434,42 +4434,42 @@ define <4 x i64> @test_pmovsxwq(<8 x i16
define <8 x i32> @test_pmovzxbd(<16 x i8> %a0, <16 x i8> *%a1) {
; GENERIC-LABEL: test_pmovzxbd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [1:1.00]
; GENERIC-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [5:1.00]
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxbd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00]
; HASWELL-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [3:1.00]
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxbd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00]
; BROADWELL-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [9:1.00]
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxbd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00]
; SKYLAKE-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [10:1.00]
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxbd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00]
; SKX-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [10:1.00]
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovzxbd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4485,42 +4485,42 @@ define <8 x i32> @test_pmovzxbd(<16 x i8
define <4 x i64> @test_pmovzxbq(<16 x i8> %a0, <16 x i8> *%a1) {
; GENERIC-LABEL: test_pmovzxbq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [1:1.00]
; GENERIC-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxbq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00]
; HASWELL-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxbq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00]
; BROADWELL-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [9:1.00]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxbq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00]
; SKYLAKE-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [10:1.00]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxbq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00]
; SKX-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [10:1.00]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovzxbq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [1:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4536,42 +4536,42 @@ define <4 x i64> @test_pmovzxbq(<16 x i8
define <16 x i16> @test_pmovzxbw(<16 x i8> %a0, <16 x i8> *%a1) {
; GENERIC-LABEL: test_pmovzxbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [1:1.00]
; GENERIC-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [5:1.00]
; GENERIC-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
; HASWELL-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [3:1.00]
; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
; BROADWELL-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [9:1.00]
; BROADWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
; SKYLAKE-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [10:1.00]
; SKYLAKE-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
; SKX-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [10:1.00]
; SKX-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovzxbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [1:0.50]
; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4585,42 +4585,42 @@ define <16 x i16> @test_pmovzxbw(<16 x i
define <4 x i64> @test_pmovzxdq(<4 x i32> %a0, <4 x i32> *%a1) {
; GENERIC-LABEL: test_pmovzxdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:1.00]
; GENERIC-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [3:1.00]
; HASWELL-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [3:1.00]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [3:1.00]
; BROADWELL-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [9:1.00]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [3:1.00]
; SKYLAKE-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [10:1.00]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [3:1.00]
; SKX-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [10:1.00]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovzxdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4634,42 +4634,42 @@ define <4 x i64> @test_pmovzxdq(<4 x i32
define <8 x i32> @test_pmovzxwd(<8 x i16> %a0, <8 x i16> *%a1) {
; GENERIC-LABEL: test_pmovzxwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
; GENERIC-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [5:1.00]
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
; HASWELL-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [3:1.00]
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
; BROADWELL-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [8:1.00]
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
; SKYLAKE-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [9:1.00]
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
; SKX-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [9:1.00]
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovzxwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4683,42 +4683,42 @@ define <8 x i32> @test_pmovzxwd(<8 x i16
define <4 x i64> @test_pmovzxwq(<8 x i16> %a0, <8 x i16> *%a1) {
; GENERIC-LABEL: test_pmovzxwq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:1.00]
; GENERIC-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmovzxwq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [3:1.00]
; HASWELL-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [3:1.00]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmovzxwq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [3:1.00]
; BROADWELL-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [9:1.00]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmovzxwq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [3:1.00]
; SKYLAKE-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [10:1.00]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmovzxwq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [3:1.00]
; SKX-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [10:1.00]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmovzxwq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [8:0.50]
; ZNVER1-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:0.25]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -4734,37 +4734,37 @@ define <4 x i64> @test_pmovzxwq(<8 x i16
define <4 x i64> @test_pmuldq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pmuldq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmuldq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmuldq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmuldq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmuldq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmuldq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4778,37 +4778,37 @@ declare <4 x i64> @llvm.x86.avx2.pmul.dq
define <16 x i16> @test_pmulhrsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pmulhrsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulhrsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmulhrsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmulhrsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmulhrsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmulhrsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4821,37 +4821,37 @@ declare <16 x i16> @llvm.x86.avx2.pmul.h
define <16 x i16> @test_pmulhuw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pmulhuw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulhuw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmulhuw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmulhuw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmulhuw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmulhuw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4864,37 +4864,37 @@ declare <16 x i16> @llvm.x86.avx2.pmulhu
define <16 x i16> @test_pmulhw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pmulhw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulhw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmulhw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmulhw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmulhw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmulhw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4907,37 +4907,37 @@ declare <16 x i16> @llvm.x86.avx2.pmulh.
define <8 x i32> @test_pmulld(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pmulld:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmulld:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [10:2.00]
; HASWELL-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmulld:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [10:2.00]
; BROADWELL-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [16:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmulld:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [8:0.67]
; SKYLAKE-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [15:0.67]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmulld:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [8:0.67]
; SKX-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [15:0.67]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmulld:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
; ZNVER1-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4949,37 +4949,37 @@ define <8 x i32> @test_pmulld(<8 x i32>
define <16 x i16> @test_pmullw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_pmullw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmullw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmullw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmullw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmullw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmullw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -4991,37 +4991,37 @@ define <16 x i16> @test_pmullw(<16 x i16
define <4 x i64> @test_pmuludq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_pmuludq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pmuludq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pmuludq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pmuludq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKYLAKE-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pmuludq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [4:0.33]
; SKX-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pmuludq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5035,42 +5035,42 @@ declare <4 x i64> @llvm.x86.avx2.pmulu.d
define <4 x i64> @test_por(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_por:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_por:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_por:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_por:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_por:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_por:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5084,37 +5084,37 @@ define <4 x i64> @test_por(<4 x i64> %a0
define <4 x i64> @test_psadbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_psadbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psadbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psadbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
; BROADWELL-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psadbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SKYLAKE-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psadbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; SKX-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psadbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
; ZNVER1-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5128,37 +5128,37 @@ declare <4 x i64> @llvm.x86.avx2.psad.bw
define <32 x i8> @test_pshufb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_pshufb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpshufb (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshufb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpshufb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpshufb (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pshufb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpshufb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpshufb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pshufb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpshufb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKYLAKE-NEXT: vpshufb (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pshufb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; SKX-NEXT: vpshufb (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pshufb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpshufb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpshufb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5171,42 +5171,42 @@ declare <32 x i8> @llvm.x86.avx2.pshuf.b
define <8 x i32> @test_pshufd(<8 x i32> %a0, <8 x i32> *%a1) {
; GENERIC-LABEL: test_pshufd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
; GENERIC-NEXT: vpshufd {{.*#+}} ymm1 = mem[1,0,3,2,5,4,7,6] sched: [5:1.00]
; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshufd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
; HASWELL-NEXT: vpshufd {{.*#+}} ymm1 = mem[1,0,3,2,5,4,7,6] sched: [1:1.00]
; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pshufd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
; BROADWELL-NEXT: vpshufd {{.*#+}} ymm1 = mem[1,0,3,2,5,4,7,6] sched: [7:1.00]
; BROADWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pshufd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
; SKYLAKE-NEXT: vpshufd {{.*#+}} ymm1 = mem[1,0,3,2,5,4,7,6] sched: [8:1.00]
; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pshufd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
; SKX-NEXT: vpshufd {{.*#+}} ymm1 = mem[1,0,3,2,5,4,7,6] sched: [8:1.00]
; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pshufd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpshufd {{.*#+}} ymm1 = mem[1,0,3,2,5,4,7,6] sched: [8:0.50]
; ZNVER1-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.25]
; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5220,42 +5220,42 @@ define <8 x i32> @test_pshufd(<8 x i32>
define <16 x i16> @test_pshufhw(<16 x i16> %a0, <16 x i16> *%a1) {
; GENERIC-LABEL: test_pshufhw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12] sched: [1:1.00]
; GENERIC-NEXT: vpshufhw {{.*#+}} ymm1 = mem[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] sched: [5:1.00]
; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshufhw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12] sched: [1:1.00]
; HASWELL-NEXT: vpshufhw {{.*#+}} ymm1 = mem[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] sched: [1:1.00]
; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pshufhw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12] sched: [1:1.00]
; BROADWELL-NEXT: vpshufhw {{.*#+}} ymm1 = mem[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] sched: [7:1.00]
; BROADWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pshufhw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12] sched: [1:1.00]
; SKYLAKE-NEXT: vpshufhw {{.*#+}} ymm1 = mem[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] sched: [8:1.00]
; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pshufhw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12] sched: [1:1.00]
; SKX-NEXT: vpshufhw {{.*#+}} ymm1 = mem[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] sched: [8:1.00]
; SKX-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pshufhw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpshufhw {{.*#+}} ymm1 = mem[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] sched: [8:0.50]
; ZNVER1-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12] sched: [1:0.25]
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5269,42 +5269,42 @@ define <16 x i16> @test_pshufhw(<16 x i1
define <16 x i16> @test_pshuflw(<16 x i16> %a0, <16 x i16> *%a1) {
; GENERIC-LABEL: test_pshuflw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15] sched: [1:1.00]
; GENERIC-NEXT: vpshuflw {{.*#+}} ymm1 = mem[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] sched: [5:1.00]
; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pshuflw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15] sched: [1:1.00]
; HASWELL-NEXT: vpshuflw {{.*#+}} ymm1 = mem[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] sched: [1:1.00]
; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pshuflw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15] sched: [1:1.00]
; BROADWELL-NEXT: vpshuflw {{.*#+}} ymm1 = mem[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] sched: [7:1.00]
; BROADWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pshuflw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15] sched: [1:1.00]
; SKYLAKE-NEXT: vpshuflw {{.*#+}} ymm1 = mem[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] sched: [8:1.00]
; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pshuflw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15] sched: [1:1.00]
; SKX-NEXT: vpshuflw {{.*#+}} ymm1 = mem[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] sched: [8:1.00]
; SKX-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pshuflw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpshuflw {{.*#+}} ymm1 = mem[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] sched: [8:0.50]
; ZNVER1-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15] sched: [1:0.25]
; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5318,37 +5318,37 @@ define <16 x i16> @test_pshuflw(<16 x i1
define <32 x i8> @test_psignb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_psignb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psignb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psignb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psignb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psignb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psignb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5361,37 +5361,37 @@ declare <32 x i8> @llvm.x86.avx2.psign.b
define <8 x i32> @test_psignd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_psignd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psignd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psignd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psignd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psignd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psignd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5404,37 +5404,37 @@ declare <8 x i32> @llvm.x86.avx2.psign.d
define <16 x i16> @test_psignw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_psignw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psignw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psignw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psignw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psignw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psignw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5447,42 +5447,42 @@ declare <16 x i16> @llvm.x86.avx2.psign.
define <8 x i32> @test_pslld(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_pslld:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pslld:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; HASWELL-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pslld:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; BROADWELL-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pslld:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pslld:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pslld:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
; ZNVER1-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; ZNVER1-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5497,32 +5497,32 @@ declare <8 x i32> @llvm.x86.avx2.psll.d(
define <32 x i8> @test_pslldq(<32 x i8> %a0) {
; GENERIC-LABEL: test_pslldq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28] sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pslldq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pslldq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28] sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pslldq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28] sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pslldq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28] sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pslldq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28] sched: [2:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a0, <32 x i32> <i32 13, i32 14, i32 15, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 29, i32 30, i32 31, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60>
@@ -5531,42 +5531,42 @@ define <32 x i8> @test_pslldq(<32 x i8>
define <4 x i64> @test_psllq(<4 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_psllq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; HASWELL-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psllq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; BROADWELL-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psllq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psllq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psllq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
; ZNVER1-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; ZNVER1-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5581,37 +5581,37 @@ declare <4 x i64> @llvm.x86.avx2.psll.q(
define <4 x i32> @test_psllvd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_psllvd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllvd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psllvd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; BROADWELL-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [8:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psllvd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psllvd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psllvd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5624,37 +5624,37 @@ declare <4 x i32> @llvm.x86.avx2.psllv.d
define <8 x i32> @test_psllvd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_psllvd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllvd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psllvd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psllvd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psllvd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psllvd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5667,37 +5667,37 @@ declare <8 x i32> @llvm.x86.avx2.psllv.d
define <2 x i64> @test_psllvq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_psllvq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllvq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psllvq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psllvq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psllvq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psllvq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5710,37 +5710,37 @@ declare <2 x i64> @llvm.x86.avx2.psllv.q
define <4 x i64> @test_psllvq_ymm(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_psllvq_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllvq_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psllvq_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psllvq_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psllvq_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psllvq_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5753,42 +5753,42 @@ declare <4 x i64> @llvm.x86.avx2.psllv.q
define <16 x i16> @test_psllw(<16 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_psllw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psllw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; HASWELL-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psllw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; BROADWELL-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psllw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psllw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psllw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
; ZNVER1-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; ZNVER1-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5803,42 +5803,42 @@ declare <16 x i16> @llvm.x86.avx2.psll.w
define <8 x i32> @test_psrad(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_psrad:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrad:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; HASWELL-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrad:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; BROADWELL-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrad:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrad:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrad:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
; ZNVER1-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; ZNVER1-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5853,37 +5853,37 @@ declare <8 x i32> @llvm.x86.avx2.psra.d(
define <4 x i32> @test_psravd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_psravd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psravd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psravd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; BROADWELL-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [8:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psravd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psravd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psravd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5896,37 +5896,37 @@ declare <4 x i32> @llvm.x86.avx2.psrav.d
define <8 x i32> @test_psravd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_psravd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psravd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psravd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psravd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psravd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psravd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -5939,42 +5939,42 @@ declare <8 x i32> @llvm.x86.avx2.psrav.d
define <16 x i16> @test_psraw(<16 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_psraw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psraw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; HASWELL-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psraw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; BROADWELL-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psraw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psraw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psraw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
; ZNVER1-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; ZNVER1-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:0.25]
@@ -5989,42 +5989,42 @@ declare <16 x i16> @llvm.x86.avx2.psra.w
define <8 x i32> @test_psrld(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_psrld:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrld:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; HASWELL-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrld:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; BROADWELL-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrld:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrld:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrld:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
; ZNVER1-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; ZNVER1-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:0.25]
@@ -6039,32 +6039,32 @@ declare <8 x i32> @llvm.x86.avx2.psrl.d(
define <32 x i8> @test_psrldq(<32 x i8> %a0) {
; GENERIC-LABEL: test_psrldq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrldq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrldq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrldq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero sched: [1:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrldq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero sched: [1:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrldq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero sched: [2:1.00]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = shufflevector <32 x i8> %a0, <32 x i8> zeroinitializer, <32 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 33, i32 34, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48, i32 49, i32 50>
@@ -6073,42 +6073,42 @@ define <32 x i8> @test_psrldq(<32 x i8>
define <4 x i64> @test_psrlq(<4 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_psrlq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; HASWELL-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrlq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; BROADWELL-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrlq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrlq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrlq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
; ZNVER1-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; ZNVER1-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:0.25]
@@ -6123,37 +6123,37 @@ declare <4 x i64> @llvm.x86.avx2.psrl.q(
define <4 x i32> @test_psrlvd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
; GENERIC-LABEL: test_psrlvd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlvd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrlvd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
; BROADWELL-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [8:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrlvd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrlvd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrlvd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6166,37 +6166,37 @@ declare <4 x i32> @llvm.x86.avx2.psrlv.d
define <8 x i32> @test_psrlvd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_psrlvd_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlvd_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrlvd_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
; BROADWELL-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [9:2.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrlvd_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrlvd_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrlvd_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6209,37 +6209,37 @@ declare <8 x i32> @llvm.x86.avx2.psrlv.d
define <2 x i64> @test_psrlvq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
; GENERIC-LABEL: test_psrlvq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlvq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrlvq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [6:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrlvq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrlvq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [7:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrlvq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6252,37 +6252,37 @@ declare <2 x i64> @llvm.x86.avx2.psrlv.q
define <4 x i64> @test_psrlvq_ymm(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_psrlvq_ymm:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlvq_ymm:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrlvq_ymm:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrlvq_ymm:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrlvq_ymm:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrlvq_ymm:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; ZNVER1-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6295,42 +6295,42 @@ declare <4 x i64> @llvm.x86.avx2.psrlv.q
define <16 x i16> @test_psrlw(<16 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
; GENERIC-LABEL: test_psrlw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psrlw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; HASWELL-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psrlw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; BROADWELL-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; BROADWELL-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psrlw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKYLAKE-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psrlw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
; SKX-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psrlw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
; ZNVER1-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
; ZNVER1-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:0.25]
@@ -6345,37 +6345,37 @@ declare <16 x i16> @llvm.x86.avx2.psrl.w
define <32 x i8> @test_psubb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_psubb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psubb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6387,37 +6387,37 @@ define <32 x i8> @test_psubb(<32 x i8> %
define <8 x i32> @test_psubd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_psubd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psubd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6429,37 +6429,37 @@ define <8 x i32> @test_psubd(<8 x i32> %
define <4 x i64> @test_psubq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_psubq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psubq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6471,37 +6471,37 @@ define <4 x i64> @test_psubq(<4 x i64> %
define <32 x i8> @test_psubsb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_psubsb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubsb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubsb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubsb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubsb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psubsb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6514,37 +6514,37 @@ declare <32 x i8> @llvm.x86.avx2.psubs.b
define <16 x i16> @test_psubsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_psubsw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubsw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubsw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubsw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubsw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psubsw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6557,37 +6557,37 @@ declare <16 x i16> @llvm.x86.avx2.psubs.
define <32 x i8> @test_psubusb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_psubusb:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubusb:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubusb:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubusb:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubusb:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psubusb:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6600,37 +6600,37 @@ declare <32 x i8> @llvm.x86.avx2.psubus.
define <16 x i16> @test_psubusw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_psubusw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubusw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubusw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubusw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKYLAKE-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubusw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; SKX-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psubusw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6643,37 +6643,37 @@ declare <16 x i16> @llvm.x86.avx2.psubus
define <16 x i16> @test_psubw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_psubw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_psubw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_psubw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_psubw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_psubw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_psubw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6685,37 +6685,37 @@ define <16 x i16> @test_psubw(<16 x i16>
define <32 x i8> @test_punpckhbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_punpckhbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:1.00]
; GENERIC-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:1.00]
; HASWELL-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckhbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:1.00]
; BROADWELL-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckhbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckhbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:1.00]
; SKX-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_punpckhbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6727,7 +6727,7 @@ define <32 x i8> @test_punpckhbw(<32 x i
define <8 x i32> @test_punpckhdq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_punpckhdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; GENERIC-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [5:1.00]
; GENERIC-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [3:1.00]
@@ -6735,7 +6735,7 @@ define <8 x i32> @test_punpckhdq(<8 x i3
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; HASWELL-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [1:1.00]
; HASWELL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
@@ -6743,7 +6743,7 @@ define <8 x i32> @test_punpckhdq(<8 x i3
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckhdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; BROADWELL-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [7:1.00]
; BROADWELL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
@@ -6751,7 +6751,7 @@ define <8 x i32> @test_punpckhdq(<8 x i3
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckhdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
; SKYLAKE-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
@@ -6759,7 +6759,7 @@ define <8 x i32> @test_punpckhdq(<8 x i3
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckhdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
; SKX-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:1.00]
; SKX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
@@ -6767,7 +6767,7 @@ define <8 x i32> @test_punpckhdq(<8 x i3
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_punpckhdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:0.50]
; ZNVER1-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.25]
@@ -6782,42 +6782,42 @@ define <8 x i32> @test_punpckhdq(<8 x i3
define <4 x i64> @test_punpckhqdq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_punpckhqdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; GENERIC-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhqdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; HASWELL-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [1:1.00]
; HASWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckhqdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; BROADWELL-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [7:1.00]
; BROADWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckhqdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:1.00]
; SKYLAKE-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckhqdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
; SKX-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:1.00]
; SKX-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_punpckhqdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
@@ -6831,37 +6831,37 @@ define <4 x i64> @test_punpckhqdq(<4 x i
define <16 x i16> @test_punpckhwd(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_punpckhwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:1.00]
; GENERIC-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckhwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:1.00]
; HASWELL-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckhwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:1.00]
; BROADWELL-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckhwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckhwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:1.00]
; SKX-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_punpckhwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6873,37 +6873,37 @@ define <16 x i16> @test_punpckhwd(<16 x
define <32 x i8> @test_punpcklbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
; GENERIC-LABEL: test_punpcklbw:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:1.00]
; GENERIC-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpcklbw:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:1.00]
; HASWELL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpcklbw:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:1.00]
; BROADWELL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpcklbw:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpcklbw:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:1.00]
; SKX-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_punpcklbw:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:0.25]
; ZNVER1-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -6915,7 +6915,7 @@ define <32 x i8> @test_punpcklbw(<32 x i
define <8 x i32> @test_punpckldq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
; GENERIC-LABEL: test_punpckldq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; GENERIC-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [5:1.00]
; GENERIC-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [3:1.00]
@@ -6923,7 +6923,7 @@ define <8 x i32> @test_punpckldq(<8 x i3
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpckldq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; HASWELL-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [1:1.00]
; HASWELL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
@@ -6931,7 +6931,7 @@ define <8 x i32> @test_punpckldq(<8 x i3
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpckldq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; BROADWELL-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [7:1.00]
; BROADWELL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
@@ -6939,7 +6939,7 @@ define <8 x i32> @test_punpckldq(<8 x i3
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpckldq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
; SKYLAKE-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
@@ -6947,7 +6947,7 @@ define <8 x i32> @test_punpckldq(<8 x i3
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpckldq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
; SKX-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:1.00]
; SKX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
@@ -6955,7 +6955,7 @@ define <8 x i32> @test_punpckldq(<8 x i3
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_punpckldq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:0.25]
; ZNVER1-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:0.50]
; ZNVER1-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.25]
@@ -6970,42 +6970,42 @@ define <8 x i32> @test_punpckldq(<8 x i3
define <4 x i64> @test_punpcklqdq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_punpcklqdq:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; GENERIC-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpcklqdq:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; HASWELL-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [1:1.00]
; HASWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpcklqdq:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; BROADWELL-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [7:1.00]
; BROADWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpcklqdq:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:1.00]
; SKYLAKE-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpcklqdq:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
; SKX-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:1.00]
; SKX-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_punpcklqdq:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.25]
; ZNVER1-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
@@ -7019,37 +7019,37 @@ define <4 x i64> @test_punpcklqdq(<4 x i
define <16 x i16> @test_punpcklwd(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
; GENERIC-LABEL: test_punpcklwd:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:1.00]
; GENERIC-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [5:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_punpcklwd:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:1.00]
; HASWELL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [1:1.00]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_punpcklwd:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:1.00]
; BROADWELL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [7:1.00]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_punpcklwd:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:1.00]
; SKYLAKE-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [8:1.00]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_punpcklwd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:1.00]
; SKX-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [8:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_punpcklwd:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:0.25]
; ZNVER1-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [8:0.50]
; ZNVER1-NEXT: retq # sched: [1:0.50]
@@ -7061,42 +7061,42 @@ define <16 x i16> @test_punpcklwd(<16 x
define <4 x i64> @test_pxor(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
; GENERIC-LABEL: test_pxor:
-; GENERIC: # BB#0:
+; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
; GENERIC-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pxor:
-; HASWELL: # BB#0:
+; HASWELL: # %bb.0:
; HASWELL-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; HASWELL-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pxor:
-; BROADWELL: # BB#0:
+; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; BROADWELL-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [7:0.50]
; BROADWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pxor:
-; SKYLAKE: # BB#0:
+; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pxor:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
;
; ZNVER1-LABEL: test_pxor:
-; ZNVER1: # BB#0:
+; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
; ZNVER1-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
Modified: llvm/trunk/test/CodeGen/X86/avx2-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-shift.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-shift.ll Mon Dec 4 09:18:51 2017
@@ -4,12 +4,12 @@
define <4 x i32> @variable_shl0(<4 x i32> %x, <4 x i32> %y) {
; X32-LABEL: variable_shl0:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl0:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = shl <4 x i32> %x, %y
@@ -18,12 +18,12 @@ define <4 x i32> @variable_shl0(<4 x i32
define <8 x i32> @variable_shl1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: variable_shl1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = shl <8 x i32> %x, %y
@@ -32,12 +32,12 @@ define <8 x i32> @variable_shl1(<8 x i32
define <2 x i64> @variable_shl2(<2 x i64> %x, <2 x i64> %y) {
; X32-LABEL: variable_shl2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = shl <2 x i64> %x, %y
@@ -46,12 +46,12 @@ define <2 x i64> @variable_shl2(<2 x i64
define <4 x i64> @variable_shl3(<4 x i64> %x, <4 x i64> %y) {
; X32-LABEL: variable_shl3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = shl <4 x i64> %x, %y
@@ -60,12 +60,12 @@ define <4 x i64> @variable_shl3(<4 x i64
define <4 x i32> @variable_srl0(<4 x i32> %x, <4 x i32> %y) {
; X32-LABEL: variable_srl0:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl0:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = lshr <4 x i32> %x, %y
@@ -74,12 +74,12 @@ define <4 x i32> @variable_srl0(<4 x i32
define <8 x i32> @variable_srl1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: variable_srl1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = lshr <8 x i32> %x, %y
@@ -88,12 +88,12 @@ define <8 x i32> @variable_srl1(<8 x i32
define <2 x i64> @variable_srl2(<2 x i64> %x, <2 x i64> %y) {
; X32-LABEL: variable_srl2:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl2:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = lshr <2 x i64> %x, %y
@@ -102,12 +102,12 @@ define <2 x i64> @variable_srl2(<2 x i64
define <4 x i64> @variable_srl3(<4 x i64> %x, <4 x i64> %y) {
; X32-LABEL: variable_srl3:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl3:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = lshr <4 x i64> %x, %y
@@ -116,12 +116,12 @@ define <4 x i64> @variable_srl3(<4 x i64
define <4 x i32> @variable_sra0(<4 x i32> %x, <4 x i32> %y) {
; X32-LABEL: variable_sra0:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_sra0:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = ashr <4 x i32> %x, %y
@@ -130,12 +130,12 @@ define <4 x i32> @variable_sra0(<4 x i32
define <8 x i32> @variable_sra1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: variable_sra1:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_sra1:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = ashr <8 x i32> %x, %y
@@ -146,12 +146,12 @@ define <8 x i32> @variable_sra1(<8 x i32
define <8 x i32> @vshift00(<8 x i32> %a) nounwind readnone {
; X32-LABEL: vshift00:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpslld $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift00:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpslld $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = shl <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -160,12 +160,12 @@ define <8 x i32> @vshift00(<8 x i32> %a)
define <16 x i16> @vshift01(<16 x i16> %a) nounwind readnone {
; X32-LABEL: vshift01:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift01:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = shl <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
@@ -174,12 +174,12 @@ define <16 x i16> @vshift01(<16 x i16> %
define <4 x i64> @vshift02(<4 x i64> %a) nounwind readnone {
; X32-LABEL: vshift02:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllq $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift02:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllq $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = shl <4 x i64> %a, <i64 2, i64 2, i64 2, i64 2>
@@ -190,12 +190,12 @@ define <4 x i64> @vshift02(<4 x i64> %a)
define <8 x i32> @vshift03(<8 x i32> %a) nounwind readnone {
; X32-LABEL: vshift03:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrld $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift03:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrld $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = lshr <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -204,12 +204,12 @@ define <8 x i32> @vshift03(<8 x i32> %a)
define <16 x i16> @vshift04(<16 x i16> %a) nounwind readnone {
; X32-LABEL: vshift04:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlw $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift04:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlw $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = lshr <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
@@ -218,12 +218,12 @@ define <16 x i16> @vshift04(<16 x i16> %
define <4 x i64> @vshift05(<4 x i64> %a) nounwind readnone {
; X32-LABEL: vshift05:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlq $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift05:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlq $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = lshr <4 x i64> %a, <i64 2, i64 2, i64 2, i64 2>
@@ -234,12 +234,12 @@ define <4 x i64> @vshift05(<4 x i64> %a)
define <8 x i32> @vshift06(<8 x i32> %a) nounwind readnone {
; X32-LABEL: vshift06:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrad $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift06:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrad $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = ashr <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -248,12 +248,12 @@ define <8 x i32> @vshift06(<8 x i32> %a)
define <16 x i16> @vshift07(<16 x i16> %a) nounwind readnone {
; X32-LABEL: vshift07:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsraw $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift07:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsraw $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = ashr <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
@@ -262,13 +262,13 @@ define <16 x i16> @vshift07(<16 x i16> %
define <4 x i32> @variable_sra0_load(<4 x i32> %x, <4 x i32>* %y) {
; X32-LABEL: variable_sra0_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsravd (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_sra0_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsravd (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <4 x i32>, <4 x i32>* %y
@@ -278,13 +278,13 @@ define <4 x i32> @variable_sra0_load(<4
define <8 x i32> @variable_sra1_load(<8 x i32> %x, <8 x i32>* %y) {
; X32-LABEL: variable_sra1_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsravd (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_sra1_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsravd (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <8 x i32>, <8 x i32>* %y
@@ -294,13 +294,13 @@ define <8 x i32> @variable_sra1_load(<8
define <4 x i32> @variable_shl0_load(<4 x i32> %x, <4 x i32>* %y) {
; X32-LABEL: variable_shl0_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsllvd (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl0_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllvd (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <4 x i32>, <4 x i32>* %y
@@ -310,13 +310,13 @@ define <4 x i32> @variable_shl0_load(<4
define <8 x i32> @variable_shl1_load(<8 x i32> %x, <8 x i32>* %y) {
; X32-LABEL: variable_shl1_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsllvd (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl1_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllvd (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <8 x i32>, <8 x i32>* %y
@@ -326,13 +326,13 @@ define <8 x i32> @variable_shl1_load(<8
define <2 x i64> @variable_shl2_load(<2 x i64> %x, <2 x i64>* %y) {
; X32-LABEL: variable_shl2_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsllvq (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl2_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllvq (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <2 x i64>, <2 x i64>* %y
@@ -342,13 +342,13 @@ define <2 x i64> @variable_shl2_load(<2
define <4 x i64> @variable_shl3_load(<4 x i64> %x, <4 x i64>* %y) {
; X32-LABEL: variable_shl3_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsllvq (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl3_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllvq (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <4 x i64>, <4 x i64>* %y
@@ -358,13 +358,13 @@ define <4 x i64> @variable_shl3_load(<4
define <4 x i32> @variable_srl0_load(<4 x i32> %x, <4 x i32>* %y) {
; X32-LABEL: variable_srl0_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsrlvd (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl0_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <4 x i32>, <4 x i32>* %y
@@ -374,13 +374,13 @@ define <4 x i32> @variable_srl0_load(<4
define <8 x i32> @variable_srl1_load(<8 x i32> %x, <8 x i32>* %y) {
; X32-LABEL: variable_srl1_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsrlvd (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl1_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <8 x i32>, <8 x i32>* %y
@@ -390,13 +390,13 @@ define <8 x i32> @variable_srl1_load(<8
define <2 x i64> @variable_srl2_load(<2 x i64> %x, <2 x i64>* %y) {
; X32-LABEL: variable_srl2_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsrlvq (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl2_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <2 x i64>, <2 x i64>* %y
@@ -406,13 +406,13 @@ define <2 x i64> @variable_srl2_load(<2
define <4 x i64> @variable_srl3_load(<4 x i64> %x, <4 x i64>* %y) {
; X32-LABEL: variable_srl3_load:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsrlvq (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl3_load:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <4 x i64>, <4 x i64>* %y
@@ -422,13 +422,13 @@ define <4 x i64> @variable_srl3_load(<4
define <32 x i8> @shl9(<32 x i8> %A) nounwind {
; X32-LABEL: shl9:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $3, %ymm0, %ymm0
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: shl9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $3, %ymm0, %ymm0
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -438,13 +438,13 @@ define <32 x i8> @shl9(<32 x i8> %A) nou
define <32 x i8> @shr9(<32 x i8> %A) nounwind {
; X32-LABEL: shr9:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlw $3, %ymm0, %ymm0
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: shr9:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlw $3, %ymm0, %ymm0
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -454,13 +454,13 @@ define <32 x i8> @shr9(<32 x i8> %A) nou
define <32 x i8> @sra_v32i8_7(<32 x i8> %A) nounwind {
; X32-LABEL: sra_v32i8_7:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sra_v32i8_7:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -470,7 +470,7 @@ define <32 x i8> @sra_v32i8_7(<32 x i8>
define <32 x i8> @sra_v32i8(<32 x i8> %A) nounwind {
; X32-LABEL: sra_v32i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsrlw $3, %ymm0, %ymm0
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -479,7 +479,7 @@ define <32 x i8> @sra_v32i8(<32 x i8> %A
; X32-NEXT: retl
;
; X64-LABEL: sra_v32i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsrlw $3, %ymm0, %ymm0
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -492,13 +492,13 @@ define <32 x i8> @sra_v32i8(<32 x i8> %A
define <16 x i16> @sext_v16i16(<16 x i16> %a) nounwind {
; X32-LABEL: sext_v16i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $8, %ymm0, %ymm0
; X32-NEXT: vpsraw $8, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext_v16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $8, %ymm0, %ymm0
; X64-NEXT: vpsraw $8, %ymm0, %ymm0
; X64-NEXT: retq
@@ -509,13 +509,13 @@ define <16 x i16> @sext_v16i16(<16 x i16
define <8 x i32> @sext_v8i32(<8 x i32> %a) nounwind {
; X32-LABEL: sext_v8i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpslld $16, %ymm0, %ymm0
; X32-NEXT: vpsrad $16, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext_v8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpslld $16, %ymm0, %ymm0
; X64-NEXT: vpsrad $16, %ymm0, %ymm0
; X64-NEXT: retq
@@ -526,7 +526,7 @@ define <8 x i32> @sext_v8i32(<8 x i32> %
define <8 x i16> @variable_shl16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-LABEL: variable_shl16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
@@ -537,7 +537,7 @@ define <8 x i16> @variable_shl16(<8 x i1
; X32-NEXT: retl
;
; X64-LABEL: variable_shl16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
@@ -552,7 +552,7 @@ define <8 x i16> @variable_shl16(<8 x i1
define <8 x i16> @variable_ashr16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-LABEL: variable_ashr16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovsxwd %xmm0, %ymm0
; X32-NEXT: vpsravd %ymm1, %ymm0, %ymm0
@@ -562,7 +562,7 @@ define <8 x i16> @variable_ashr16(<8 x i
; X32-NEXT: retl
;
; X64-LABEL: variable_ashr16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovsxwd %xmm0, %ymm0
; X64-NEXT: vpsravd %ymm1, %ymm0, %ymm0
@@ -576,7 +576,7 @@ define <8 x i16> @variable_ashr16(<8 x i
define <8 x i16> @variable_lshr16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-LABEL: variable_lshr16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
@@ -587,7 +587,7 @@ define <8 x i16> @variable_lshr16(<8 x i
; X32-NEXT: retl
;
; X64-LABEL: variable_lshr16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
Modified: llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll Mon Dec 4 09:18:51 2017
@@ -6,13 +6,13 @@
define <16 x i8> @BB16(i8* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: BB16:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastb (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: BB16:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastb (%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -38,13 +38,13 @@ entry:
define <32 x i8> @BB32(i8* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: BB32:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastb (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: BB32:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastb (%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -87,13 +87,13 @@ entry:
define <8 x i16> @W16(i16* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: W16:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastw (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: W16:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastw (%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -111,13 +111,13 @@ entry:
define <16 x i16> @WW16(i16* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: WW16:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastw (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: WW16:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastw (%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -143,13 +143,13 @@ entry:
define <4 x i32> @D32(i32* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: D32:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss (%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: D32:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss (%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -163,13 +163,13 @@ entry:
define <8 x i32> @DD32(i32* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: DD32:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: DD32:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss (%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -187,7 +187,7 @@ entry:
define <2 x i64> @Q64(i64* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: Q64:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl (%eax), %ecx
; X32-NEXT: movl 4(%eax), %eax
@@ -198,7 +198,7 @@ define <2 x i64> @Q64(i64* %ptr) nounwin
; X32-NEXT: retl
;
; X64-LABEL: Q64:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastq (%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -210,7 +210,7 @@ entry:
define <4 x i64> @QQ64(i64* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: QQ64:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl (%eax), %ecx
; X32-NEXT: movl 4(%eax), %eax
@@ -222,7 +222,7 @@ define <4 x i64> @QQ64(i64* %ptr) nounwi
; X32-NEXT: retl
;
; X64-LABEL: QQ64:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastsd (%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -236,13 +236,13 @@ entry:
define <8 x i16> @broadcast_mem_v4i16_v8i16(<4 x i16>* %ptr) {
; X32-LABEL: broadcast_mem_v4i16_v8i16:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
; X64-LABEL: broadcast_mem_v4i16_v8i16:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpbroadcastq (%rdi), %xmm0
; X64-NEXT: retq
%load = load <4 x i16>, <4 x i16>* %ptr
@@ -252,14 +252,14 @@ define <8 x i16> @broadcast_mem_v4i16_v8
define <16 x i16> @broadcast_mem_v4i16_v16i16(<4 x i16>* %ptr) {
; X32-LABEL: broadcast_mem_v4i16_v16i16:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: broadcast_mem_v4i16_v16i16:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastsd (%rdi), %ymm0
; X64-NEXT: retq
%load = load <4 x i16>, <4 x i16>* %ptr
@@ -271,13 +271,13 @@ define <16 x i16> @broadcast_mem_v4i16_v
define <16 x i8> @load_splat_16i8_16i8_1111111111111111(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_16i8_16i8_1111111111111111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastb 1(%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_16i8_16i8_1111111111111111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastb 1(%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -288,13 +288,13 @@ entry:
define <32 x i8> @load_splat_32i8_16i8_11111111111111111111111111111111(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_32i8_16i8_11111111111111111111111111111111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastb 1(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_32i8_16i8_11111111111111111111111111111111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastb 1(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -305,13 +305,13 @@ entry:
define <32 x i8> @load_splat_32i8_32i8_11111111111111111111111111111111(<32 x i8>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_32i8_32i8_11111111111111111111111111111111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastb 1(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_32i8_32i8_11111111111111111111111111111111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastb 1(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -322,13 +322,13 @@ entry:
define <8 x i16> @load_splat_8i16_8i16_11111111(<8 x i16>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_8i16_8i16_11111111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastw 2(%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_8i16_8i16_11111111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastw 2(%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -339,13 +339,13 @@ entry:
define <16 x i16> @load_splat_16i16_8i16_1111111111111111(<8 x i16>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_16i16_8i16_1111111111111111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastw 2(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_16i16_8i16_1111111111111111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastw 2(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -356,13 +356,13 @@ entry:
define <16 x i16> @load_splat_16i16_16i16_1111111111111111(<16 x i16>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_16i16_16i16_1111111111111111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpbroadcastw 2(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_16i16_16i16_1111111111111111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastw 2(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -373,13 +373,13 @@ entry:
define <4 x i32> @load_splat_4i32_4i32_1111(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4i32_4i32_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 4(%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4i32_4i32_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 4(%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -390,13 +390,13 @@ entry:
define <8 x i32> @load_splat_8i32_4i32_33333333(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_8i32_4i32_33333333:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 12(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_8i32_4i32_33333333:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 12(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -407,13 +407,13 @@ entry:
define <8 x i32> @load_splat_8i32_8i32_55555555(<8 x i32>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_8i32_8i32_55555555:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 20(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_8i32_8i32_55555555:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 20(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -424,13 +424,13 @@ entry:
define <4 x float> @load_splat_4f32_4f32_1111(<4 x float>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4f32_4f32_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 4(%eax), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4f32_4f32_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 4(%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -441,13 +441,13 @@ entry:
define <8 x float> @load_splat_8f32_4f32_33333333(<4 x float>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_8f32_4f32_33333333:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 12(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_8f32_4f32_33333333:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 12(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -458,13 +458,13 @@ entry:
define <8 x float> @load_splat_8f32_8f32_55555555(<8 x float>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_8f32_8f32_55555555:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastss 20(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_8f32_8f32_55555555:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastss 20(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -475,13 +475,13 @@ entry:
define <2 x i64> @load_splat_2i64_2i64_1111(<2 x i64>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_2i64_2i64_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
; X64-LABEL: load_splat_2i64_2i64_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vpbroadcastq 8(%rdi), %xmm0
; X64-NEXT: retq
entry:
@@ -492,13 +492,13 @@ entry:
define <4 x i64> @load_splat_4i64_2i64_1111(<2 x i64>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4i64_2i64_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd 8(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4i64_2i64_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastsd 8(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -509,13 +509,13 @@ entry:
define <4 x i64> @load_splat_4i64_4i64_2222(<4 x i64>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4i64_4i64_2222:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd 16(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4i64_4i64_2222:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastsd 16(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -526,13 +526,13 @@ entry:
define <2 x double> @load_splat_2f64_2f64_1111(<2 x double>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_2f64_2f64_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
; X64-LABEL: load_splat_2f64_2f64_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
entry:
@@ -543,13 +543,13 @@ entry:
define <4 x double> @load_splat_4f64_2f64_1111(<2 x double>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4f64_2f64_1111:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd 8(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4f64_2f64_1111:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastsd 8(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -560,13 +560,13 @@ entry:
define <4 x double> @load_splat_4f64_4f64_2222(<4 x double>* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: load_splat_4f64_4f64_2222:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastsd 16(%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_splat_4f64_4f64_2222:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vbroadcastsd 16(%rdi), %ymm0
; X64-NEXT: retq
entry:
@@ -579,13 +579,13 @@ entry:
; this used to crash
define <2 x double> @I(double* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: I:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-NEXT: retl
;
; X64-LABEL: I:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
entry:
@@ -597,24 +597,24 @@ entry:
define <8 x i32> @V111(<8 x i32> %in) nounwind uwtable readnone ssp {
; X32-AVX2-LABEL: V111:
-; X32-AVX2: ## BB#0: ## %entry
+; X32-AVX2: ## %bb.0: ## %entry
; X32-AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2]
; X32-AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
;
; X64-AVX2-LABEL: V111:
-; X64-AVX2: ## BB#0: ## %entry
+; X64-AVX2: ## %bb.0: ## %entry
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2]
; X64-AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: retq
;
; X32-AVX512VL-LABEL: V111:
-; X32-AVX512VL: ## BB#0: ## %entry
+; X32-AVX512VL: ## %bb.0: ## %entry
; X32-AVX512VL-NEXT: vpaddd LCPI29_0{1to8}, %ymm0, %ymm0
; X32-AVX512VL-NEXT: retl
;
; X64-AVX512VL-LABEL: V111:
-; X64-AVX512VL: ## BB#0: ## %entry
+; X64-AVX512VL: ## %bb.0: ## %entry
; X64-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
entry:
@@ -624,24 +624,24 @@ entry:
define <8 x float> @V113(<8 x float> %in) nounwind uwtable readnone ssp {
; X32-AVX2-LABEL: V113:
-; X32-AVX2: ## BB#0: ## %entry
+; X32-AVX2: ## %bb.0: ## %entry
; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125]
; X32-AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
;
; X64-AVX2-LABEL: V113:
-; X64-AVX2: ## BB#0: ## %entry
+; X64-AVX2: ## %bb.0: ## %entry
; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125,-0.0078125]
; X64-AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: retq
;
; X32-AVX512VL-LABEL: V113:
-; X32-AVX512VL: ## BB#0: ## %entry
+; X32-AVX512VL: ## %bb.0: ## %entry
; X32-AVX512VL-NEXT: vaddps LCPI30_0{1to8}, %ymm0, %ymm0
; X32-AVX512VL-NEXT: retl
;
; X64-AVX512VL-LABEL: V113:
-; X64-AVX512VL: ## BB#0: ## %entry
+; X64-AVX512VL: ## %bb.0: ## %entry
; X64-AVX512VL-NEXT: vaddps {{.*}}(%rip){1to8}, %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
entry:
@@ -651,12 +651,12 @@ entry:
define <4 x float> @_e2(float* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: _e2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss {{.*#+}} xmm0 = [-0.0078125,-0.0078125,-0.0078125,-0.0078125]
; X32-NEXT: retl
;
; X64-LABEL: _e2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss {{.*#+}} xmm0 = [-0.0078125,-0.0078125,-0.0078125,-0.0078125]
; X64-NEXT: retq
%vecinit.i = insertelement <4 x float> undef, float 0xbf80000000000000, i32 0
@@ -668,12 +668,12 @@ define <4 x float> @_e2(float* %ptr) nou
define <8 x i8> @_e4(i8* %ptr) nounwind uwtable readnone ssp {
; X32-LABEL: _e4:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vmovaps {{.*#+}} xmm0 = [52,52,52,52,52,52,52,52]
; X32-NEXT: retl
;
; X64-LABEL: _e4:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovaps {{.*#+}} xmm0 = [52,52,52,52,52,52,52,52]
; X64-NEXT: retq
%vecinit0.i = insertelement <8 x i8> undef, i8 52, i32 0
@@ -689,11 +689,11 @@ define <8 x i8> @_e4(i8* %ptr) nounwind
define void @crash() nounwind alwaysinline {
; X32-LABEL: crash:
-; X32: ## BB#0: ## %WGLoopsEntry
+; X32: ## %bb.0: ## %WGLoopsEntry
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: testb %al, %al
; X32-NEXT: je LBB33_1
-; X32-NEXT: ## BB#2: ## %ret
+; X32-NEXT: ## %bb.2: ## %ret
; X32-NEXT: retl
; X32-NEXT: .p2align 4, 0x90
; X32-NEXT: LBB33_1: ## %footer349VF
@@ -701,11 +701,11 @@ define void @crash() nounwind alwaysinli
; X32-NEXT: jmp LBB33_1
;
; X64-LABEL: crash:
-; X64: ## BB#0: ## %WGLoopsEntry
+; X64: ## %bb.0: ## %WGLoopsEntry
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: testb %al, %al
; X64-NEXT: je LBB33_1
-; X64-NEXT: ## BB#2: ## %ret
+; X64-NEXT: ## %bb.2: ## %ret
; X64-NEXT: retq
; X64-NEXT: .p2align 4, 0x90
; X64-NEXT: LBB33_1: ## %footer349VF
@@ -739,18 +739,18 @@ ret:
define <8 x i32> @_inreg0(i32 %scalar) nounwind uwtable readnone ssp {
; X32-LABEL: _inreg0:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
; X32-NEXT: retl
;
; X64-AVX2-LABEL: _inreg0:
-; X64-AVX2: ## BB#0:
+; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vmovd %edi, %xmm0
; X64-AVX2-NEXT: vpbroadcastd %xmm0, %ymm0
; X64-AVX2-NEXT: retq
;
; X64-AVX512VL-LABEL: _inreg0:
-; X64-AVX512VL: ## BB#0:
+; X64-AVX512VL: ## %bb.0:
; X64-AVX512VL-NEXT: vpbroadcastd %edi, %ymm0
; X64-AVX512VL-NEXT: retq
%in = insertelement <8 x i32> undef, i32 %scalar, i32 0
@@ -760,12 +760,12 @@ define <8 x i32> @_inreg0(i32 %scalar) n
define <8 x float> @_inreg1(float %scalar) nounwind uwtable readnone ssp {
; X32-LABEL: _inreg1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%in = insertelement <8 x float> undef, float %scalar, i32 0
@@ -775,12 +775,12 @@ define <8 x float> @_inreg1(float %scala
define <4 x float> @_inreg2(float %scalar) nounwind uwtable readnone ssp {
; X32-LABEL: _inreg2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %xmm0
; X64-NEXT: retq
%in = insertelement <4 x float> undef, float %scalar, i32 0
@@ -790,12 +790,12 @@ define <4 x float> @_inreg2(float %scala
define <4 x double> @_inreg3(double %scalar) nounwind uwtable readnone ssp {
; X32-LABEL: _inreg3:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg3:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%in = insertelement <4 x double> undef, double %scalar, i32 0
@@ -805,12 +805,12 @@ define <4 x double> @_inreg3(double %sca
define <8 x float> @_inreg8xfloat(<8 x float> %a) {
; X32-LABEL: _inreg8xfloat:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg8xfloat:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%b = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> zeroinitializer
@@ -819,12 +819,12 @@ define <8 x float> @_inreg8xfloat(<8 x
define <4 x float> @_inreg4xfloat(<4 x float> %a) {
; X32-LABEL: _inreg4xfloat:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg4xfloat:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %xmm0
; X64-NEXT: retq
%b = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> zeroinitializer
@@ -833,12 +833,12 @@ define <4 x float> @_inreg4xfloat(<4 x
define <16 x i16> @_inreg16xi16(<16 x i16> %a) {
; X32-LABEL: _inreg16xi16:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpbroadcastw %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg16xi16:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpbroadcastw %xmm0, %ymm0
; X64-NEXT: retq
%b = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> zeroinitializer
@@ -847,12 +847,12 @@ define <16 x i16> @_inreg16xi16(<16 x
define <8 x i16> @_inreg8xi16(<8 x i16> %a) {
; X32-LABEL: _inreg8xi16:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpbroadcastw %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg8xi16:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpbroadcastw %xmm0, %xmm0
; X64-NEXT: retq
%b = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> zeroinitializer
@@ -861,12 +861,12 @@ define <8 x i16> @_inreg8xi16(<8 x i16
define <4 x i64> @_inreg4xi64(<4 x i64> %a) {
; X32-LABEL: _inreg4xi64:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg4xi64:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%b = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> zeroinitializer
@@ -875,12 +875,12 @@ define <4 x i64> @_inreg4xi64(<4 x i64
define <2 x i64> @_inreg2xi64(<2 x i64> %a) {
; X32-LABEL: _inreg2xi64:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpbroadcastq %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg2xi64:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpbroadcastq %xmm0, %xmm0
; X64-NEXT: retq
%b = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -889,12 +889,12 @@ define <2 x i64> @_inreg2xi64(<2 x i64
define <4 x double> @_inreg4xdouble(<4 x double> %a) {
; X32-LABEL: _inreg4xdouble:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg4xdouble:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%b = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> zeroinitializer
@@ -903,12 +903,12 @@ define <4 x double> @_inreg4xdouble(<4
define <2 x double> @_inreg2xdouble(<2 x double> %a) {
; X32-LABEL: _inreg2xdouble:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X32-NEXT: retl
;
; X64-LABEL: _inreg2xdouble:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; X64-NEXT: retq
%b = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> zeroinitializer
@@ -917,12 +917,12 @@ define <2 x double> @_inreg2xdouble(<2
define <8 x i32> @_inreg8xi32(<8 x i32> %a) {
; X32-LABEL: _inreg8xi32:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg8xi32:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%b = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> zeroinitializer
@@ -931,12 +931,12 @@ define <8 x i32> @_inreg8xi32(<8 x i32
define <4 x i32> @_inreg4xi32(<4 x i32> %a) {
; X32-LABEL: _inreg4xi32:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg4xi32:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %xmm0
; X64-NEXT: retq
%b = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> zeroinitializer
@@ -945,12 +945,12 @@ define <4 x i32> @_inreg4xi32(<4 x i32
define <32 x i8> @_inreg32xi8(<32 x i8> %a) {
; X32-LABEL: _inreg32xi8:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpbroadcastb %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg32xi8:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpbroadcastb %xmm0, %ymm0
; X64-NEXT: retq
%b = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> zeroinitializer
@@ -959,12 +959,12 @@ define <32 x i8> @_inreg32xi8(<32 x i8
define <16 x i8> @_inreg16xi8(<16 x i8> %a) {
; X32-LABEL: _inreg16xi8:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vpbroadcastb %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: _inreg16xi8:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vpbroadcastb %xmm0, %xmm0
; X64-NEXT: retq
%b = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> zeroinitializer
@@ -977,12 +977,12 @@ define <16 x i8> @_inreg16xi8(<16 x i8
define <8 x float> @splat_concat1(float %f) {
; X32-LABEL: splat_concat1:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: splat_concat1:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%1 = insertelement <4 x float> undef, float %f, i32 0
@@ -995,12 +995,12 @@ define <8 x float> @splat_concat1(float
define <8 x float> @splat_concat2(float %f) {
; X32-LABEL: splat_concat2:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: splat_concat2:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%1 = insertelement <4 x float> undef, float %f, i32 0
@@ -1017,12 +1017,12 @@ define <8 x float> @splat_concat2(float
define <4 x double> @splat_concat3(double %d) {
; X32-LABEL: splat_concat3:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: splat_concat3:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%1 = insertelement <2 x double> undef, double %d, i32 0
@@ -1033,12 +1033,12 @@ define <4 x double> @splat_concat3(doubl
define <4 x double> @splat_concat4(double %d) {
; X32-LABEL: splat_concat4:
-; X32: ## BB#0:
+; X32: ## %bb.0:
; X32-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: splat_concat4:
-; X64: ## BB#0:
+; X64: ## %bb.0:
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%1 = insertelement <2 x double> undef, double %d, i32 0
@@ -1059,7 +1059,7 @@ define <4 x double> @splat_concat4(doubl
define void @isel_crash_16b(i8* %cV_R.addr) {
; X32-LABEL: isel_crash_16b:
-; X32: ## BB#0: ## %eintry
+; X32: ## %bb.0: ## %eintry
; X32-NEXT: subl $60, %esp
; X32-NEXT: .cfi_def_cfa_offset 64
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1072,7 +1072,7 @@ define void @isel_crash_16b(i8* %cV_R.ad
; X32-NEXT: retl
;
; X64-LABEL: isel_crash_16b:
-; X64: ## BB#0: ## %eintry
+; X64: ## %bb.0: ## %eintry
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: movb (%rdi), %al
@@ -1098,7 +1098,7 @@ eintry:
define void @isel_crash_32b(i8* %cV_R.addr) {
; X32-LABEL: isel_crash_32b:
-; X32: ## BB#0: ## %eintry
+; X32: ## %bb.0: ## %eintry
; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebp, -8
@@ -1118,7 +1118,7 @@ define void @isel_crash_32b(i8* %cV_R.ad
; X32-NEXT: retl
;
; X64-LABEL: isel_crash_32b:
-; X64: ## BB#0: ## %eintry
+; X64: ## %bb.0: ## %eintry
; X64-NEXT: pushq %rbp
; X64-NEXT: .cfi_def_cfa_offset 16
; X64-NEXT: .cfi_offset %rbp, -16
@@ -1154,7 +1154,7 @@ eintry:
define void @isel_crash_8w(i16* %cV_R.addr) {
; X32-LABEL: isel_crash_8w:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: subl $60, %esp
; X32-NEXT: .cfi_def_cfa_offset 64
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1167,7 +1167,7 @@ define void @isel_crash_8w(i16* %cV_R.ad
; X32-NEXT: retl
;
; X64-LABEL: isel_crash_8w:
-; X64: ## BB#0: ## %entry
+; X64: ## %bb.0: ## %entry
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: movzwl (%rdi), %eax
@@ -1193,7 +1193,7 @@ entry:
define void @isel_crash_16w(i16* %cV_R.addr) {
; X32-LABEL: isel_crash_16w:
-; X32: ## BB#0: ## %eintry
+; X32: ## %bb.0: ## %eintry
; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebp, -8
@@ -1213,7 +1213,7 @@ define void @isel_crash_16w(i16* %cV_R.a
; X32-NEXT: retl
;
; X64-LABEL: isel_crash_16w:
-; X64: ## BB#0: ## %eintry
+; X64: ## %bb.0: ## %eintry
; X64-NEXT: pushq %rbp
; X64-NEXT: .cfi_def_cfa_offset 16
; X64-NEXT: .cfi_offset %rbp, -16
@@ -1249,7 +1249,7 @@ eintry:
define void @isel_crash_4d(i32* %cV_R.addr) {
; X32-LABEL: isel_crash_4d:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: subl $60, %esp
; X32-NEXT: .cfi_def_cfa_offset 64
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1262,7 +1262,7 @@ define void @isel_crash_4d(i32* %cV_R.ad
; X32-NEXT: retl
;
; X64-AVX2-LABEL: isel_crash_4d:
-; X64-AVX2: ## BB#0: ## %entry
+; X64-AVX2: ## %bb.0: ## %entry
; X64-AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-AVX2-NEXT: movl (%rdi), %eax
@@ -1273,7 +1273,7 @@ define void @isel_crash_4d(i32* %cV_R.ad
; X64-AVX2-NEXT: retq
;
; X64-AVX512VL-LABEL: isel_crash_4d:
-; X64-AVX512VL: ## BB#0: ## %entry
+; X64-AVX512VL: ## %bb.0: ## %entry
; X64-AVX512VL-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX512VL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-AVX512VL-NEXT: movl (%rdi), %eax
@@ -1298,7 +1298,7 @@ entry:
define void @isel_crash_8d(i32* %cV_R.addr) {
; X32-LABEL: isel_crash_8d:
-; X32: ## BB#0: ## %eintry
+; X32: ## %bb.0: ## %eintry
; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebp, -8
@@ -1318,7 +1318,7 @@ define void @isel_crash_8d(i32* %cV_R.ad
; X32-NEXT: retl
;
; X64-AVX2-LABEL: isel_crash_8d:
-; X64-AVX2: ## BB#0: ## %eintry
+; X64-AVX2: ## %bb.0: ## %eintry
; X64-AVX2-NEXT: pushq %rbp
; X64-AVX2-NEXT: .cfi_def_cfa_offset 16
; X64-AVX2-NEXT: .cfi_offset %rbp, -16
@@ -1339,7 +1339,7 @@ define void @isel_crash_8d(i32* %cV_R.ad
; X64-AVX2-NEXT: retq
;
; X64-AVX512VL-LABEL: isel_crash_8d:
-; X64-AVX512VL: ## BB#0: ## %eintry
+; X64-AVX512VL: ## %bb.0: ## %eintry
; X64-AVX512VL-NEXT: pushq %rbp
; X64-AVX512VL-NEXT: .cfi_def_cfa_offset 16
; X64-AVX512VL-NEXT: .cfi_offset %rbp, -16
@@ -1374,7 +1374,7 @@ eintry:
define void @isel_crash_2q(i64* %cV_R.addr) {
; X32-LABEL: isel_crash_2q:
-; X32: ## BB#0: ## %entry
+; X32: ## %bb.0: ## %entry
; X32-NEXT: subl $60, %esp
; X32-NEXT: .cfi_def_cfa_offset 64
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1392,7 +1392,7 @@ define void @isel_crash_2q(i64* %cV_R.ad
; X32-NEXT: retl
;
; X64-AVX2-LABEL: isel_crash_2q:
-; X64-AVX2: ## BB#0: ## %entry
+; X64-AVX2: ## %bb.0: ## %entry
; X64-AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-AVX2-NEXT: movq (%rdi), %rax
@@ -1403,7 +1403,7 @@ define void @isel_crash_2q(i64* %cV_R.ad
; X64-AVX2-NEXT: retq
;
; X64-AVX512VL-LABEL: isel_crash_2q:
-; X64-AVX512VL: ## BB#0: ## %entry
+; X64-AVX512VL: ## %bb.0: ## %entry
; X64-AVX512VL-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-AVX512VL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-AVX512VL-NEXT: movq (%rdi), %rax
@@ -1427,7 +1427,7 @@ entry:
define void @isel_crash_4q(i64* %cV_R.addr) {
; X32-LABEL: isel_crash_4q:
-; X32: ## BB#0: ## %eintry
+; X32: ## %bb.0: ## %eintry
; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebp, -8
@@ -1453,7 +1453,7 @@ define void @isel_crash_4q(i64* %cV_R.ad
; X32-NEXT: retl
;
; X64-AVX2-LABEL: isel_crash_4q:
-; X64-AVX2: ## BB#0: ## %eintry
+; X64-AVX2: ## %bb.0: ## %eintry
; X64-AVX2-NEXT: pushq %rbp
; X64-AVX2-NEXT: .cfi_def_cfa_offset 16
; X64-AVX2-NEXT: .cfi_offset %rbp, -16
@@ -1474,7 +1474,7 @@ define void @isel_crash_4q(i64* %cV_R.ad
; X64-AVX2-NEXT: retq
;
; X64-AVX512VL-LABEL: isel_crash_4q:
-; X64-AVX512VL: ## BB#0: ## %eintry
+; X64-AVX512VL: ## %bb.0: ## %eintry
; X64-AVX512VL-NEXT: pushq %rbp
; X64-AVX512VL-NEXT: .cfi_def_cfa_offset 16
; X64-AVX512VL-NEXT: .cfi_offset %rbp, -16
Modified: llvm/trunk/test/CodeGen/X86/avx2-vbroadcasti128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-vbroadcasti128.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-vbroadcasti128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-vbroadcasti128.ll Mon Dec 4 09:18:51 2017
@@ -4,14 +4,14 @@
define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
; X32-LABEL: test_broadcast_2f64_4f64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vaddpd {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2f64_4f64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -23,14 +23,14 @@ define <4 x double> @test_broadcast_2f64
define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
; X32-LABEL: test_broadcast_2i64_4i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vpaddq {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2i64_4i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -42,14 +42,14 @@ define <4 x i64> @test_broadcast_2i64_4i
define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
; X32-LABEL: test_broadcast_4f32_8f32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vaddps {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4f32_8f32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -61,14 +61,14 @@ define <8 x float> @test_broadcast_4f32_
define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
; X32-LABEL: test_broadcast_4i32_8i32:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4i32_8i32:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -80,14 +80,14 @@ define <8 x i32> @test_broadcast_4i32_8i
define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
; X32-LABEL: test_broadcast_8i16_16i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vpaddw {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_8i16_16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -99,14 +99,14 @@ define <16 x i16> @test_broadcast_8i16_1
define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
; X32-LABEL: test_broadcast_16i8_32i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X32-NEXT: vpaddb {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_16i8_32i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
; X64-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -118,7 +118,7 @@ define <32 x i8> @test_broadcast_16i8_32
define <4 x double> @test_broadcast_2f64_4f64_reuse(<2 x double>* %p0, <2 x double>* %p1) {
; X32-LABEL: test_broadcast_2f64_4f64_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovapd (%ecx), %xmm1
@@ -128,7 +128,7 @@ define <4 x double> @test_broadcast_2f64
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2f64_4f64_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovapd (%rdi), %xmm1
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
@@ -143,7 +143,7 @@ define <4 x double> @test_broadcast_2f64
define <4 x i64> @test_broadcast_2i64_4i64_reuse(<2 x i64>* %p0, <2 x i64>* %p1) {
; X32-LABEL: test_broadcast_2i64_4i64_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovdqa (%ecx), %xmm1
@@ -153,7 +153,7 @@ define <4 x i64> @test_broadcast_2i64_4i
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2i64_4i64_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa (%rdi), %xmm1
; X64-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
@@ -168,7 +168,7 @@ define <4 x i64> @test_broadcast_2i64_4i
define <8 x float> @test_broadcast_4f32_8f32_reuse(<4 x float>* %p0, <4 x float>* %p1) {
; X32-LABEL: test_broadcast_4f32_8f32_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm1
@@ -178,7 +178,7 @@ define <8 x float> @test_broadcast_4f32_
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4f32_8f32_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm1
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
@@ -193,7 +193,7 @@ define <8 x float> @test_broadcast_4f32_
define <8 x i32> @test_broadcast_4i32_8i32_reuse(<4 x i32>* %p0, <4 x i32>* %p1) {
; X32-LABEL: test_broadcast_4i32_8i32_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovdqa (%ecx), %xmm1
@@ -203,7 +203,7 @@ define <8 x i32> @test_broadcast_4i32_8i
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4i32_8i32_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa (%rdi), %xmm1
; X64-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
@@ -218,7 +218,7 @@ define <8 x i32> @test_broadcast_4i32_8i
define <16 x i16> @test_broadcast_8i16_16i16_reuse(<8 x i16> *%p0, <8 x i16> *%p1) nounwind {
; X32-LABEL: test_broadcast_8i16_16i16_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovdqa (%ecx), %xmm1
@@ -228,7 +228,7 @@ define <16 x i16> @test_broadcast_8i16_1
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_8i16_16i16_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa (%rdi), %xmm1
; X64-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
@@ -243,7 +243,7 @@ define <16 x i16> @test_broadcast_8i16_1
define <32 x i8> @test_broadcast_16i8_32i8_reuse(<16 x i8> *%p0, <16 x i8> *%p1) nounwind {
; X32-LABEL: test_broadcast_16i8_32i8_reuse:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovdqa (%ecx), %xmm1
@@ -253,7 +253,7 @@ define <32 x i8> @test_broadcast_16i8_32
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_16i8_32i8_reuse:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovdqa (%rdi), %xmm1
; X64-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm0
; X64-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
@@ -268,7 +268,7 @@ define <32 x i8> @test_broadcast_16i8_32
define <8 x i32> @PR29088(<4 x i32>* %p0, <8 x float>* %p1) {
; X32-LABEL: PR29088:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovaps (%ecx), %xmm0
@@ -278,7 +278,7 @@ define <8 x i32> @PR29088(<4 x i32>* %p0
; X32-NEXT: retl
;
; X64-LABEL: PR29088:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm0
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT: vmovaps %ymm1, (%rsi)
Modified: llvm/trunk/test/CodeGen/X86/avx2-vector-shifts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-vector-shifts.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-vector-shifts.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-vector-shifts.ll Mon Dec 4 09:18:51 2017
@@ -6,11 +6,11 @@
define <16 x i16> @test_sllw_1(<16 x i16> %InVec) {
; X32-LABEL: test_sllw_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_sllw_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%shl = shl <16 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
@@ -19,12 +19,12 @@ entry:
define <16 x i16> @test_sllw_2(<16 x i16> %InVec) {
; X32-LABEL: test_sllw_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpaddw %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sllw_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpaddw %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -34,12 +34,12 @@ entry:
define <16 x i16> @test_sllw_3(<16 x i16> %InVec) {
; X32-LABEL: test_sllw_3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsllw $15, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sllw_3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsllw $15, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -49,11 +49,11 @@ entry:
define <8 x i32> @test_slld_1(<8 x i32> %InVec) {
; X32-LABEL: test_slld_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_slld_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%shl = shl <8 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -62,12 +62,12 @@ entry:
define <8 x i32> @test_slld_2(<8 x i32> %InVec) {
; X32-LABEL: test_slld_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_slld_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -77,14 +77,14 @@ entry:
define <8 x i32> @test_vpslld_var(i32 %shift) {
; X32-LABEL: test_vpslld_var:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [192,193,194,195,196,197,198,199]
; X32-NEXT: vpslld %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpslld_var:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vmovd %edi, %xmm0
; X64-NEXT: vmovdqa {{.*#+}} ymm1 = [192,193,194,195,196,197,198,199]
; X64-NEXT: vpslld %xmm0, %ymm1, %ymm0
@@ -96,12 +96,12 @@ define <8 x i32> @test_vpslld_var(i32 %s
define <8 x i32> @test_slld_3(<8 x i32> %InVec) {
; X32-LABEL: test_slld_3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpslld $31, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_slld_3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpslld $31, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -111,11 +111,11 @@ entry:
define <4 x i64> @test_sllq_1(<4 x i64> %InVec) {
; X32-LABEL: test_sllq_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_sllq_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%shl = shl <4 x i64> %InVec, <i64 0, i64 0, i64 0, i64 0>
@@ -124,12 +124,12 @@ entry:
define <4 x i64> @test_sllq_2(<4 x i64> %InVec) {
; X32-LABEL: test_sllq_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sllq_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -139,12 +139,12 @@ entry:
define <4 x i64> @test_sllq_3(<4 x i64> %InVec) {
; X32-LABEL: test_sllq_3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsllq $63, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sllq_3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsllq $63, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -156,11 +156,11 @@ entry:
define <16 x i16> @test_sraw_1(<16 x i16> %InVec) {
; X32-LABEL: test_sraw_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_sraw_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%shl = ashr <16 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
@@ -169,12 +169,12 @@ entry:
define <16 x i16> @test_sraw_2(<16 x i16> %InVec) {
; X32-LABEL: test_sraw_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsraw $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sraw_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsraw $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -184,12 +184,12 @@ entry:
define <16 x i16> @test_sraw_3(<16 x i16> %InVec) {
; X32-LABEL: test_sraw_3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsraw $15, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sraw_3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsraw $15, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -199,11 +199,11 @@ entry:
define <8 x i32> @test_srad_1(<8 x i32> %InVec) {
; X32-LABEL: test_srad_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_srad_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%shl = ashr <8 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -212,12 +212,12 @@ entry:
define <8 x i32> @test_srad_2(<8 x i32> %InVec) {
; X32-LABEL: test_srad_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsrad $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srad_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsrad $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -227,12 +227,12 @@ entry:
define <8 x i32> @test_srad_3(<8 x i32> %InVec) {
; X32-LABEL: test_srad_3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsrad $31, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srad_3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsrad $31, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -244,11 +244,11 @@ entry:
define <16 x i16> @test_srlw_1(<16 x i16> %InVec) {
; X32-LABEL: test_srlw_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_srlw_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%shl = lshr <16 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
@@ -257,12 +257,12 @@ entry:
define <16 x i16> @test_srlw_2(<16 x i16> %InVec) {
; X32-LABEL: test_srlw_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsrlw $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srlw_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsrlw $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -272,12 +272,12 @@ entry:
define <16 x i16> @test_srlw_3(<16 x i16> %InVec) {
; X32-LABEL: test_srlw_3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsrlw $15, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srlw_3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsrlw $15, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -287,11 +287,11 @@ entry:
define <8 x i32> @test_srld_1(<8 x i32> %InVec) {
; X32-LABEL: test_srld_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_srld_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%shl = lshr <8 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -300,12 +300,12 @@ entry:
define <8 x i32> @test_srld_2(<8 x i32> %InVec) {
; X32-LABEL: test_srld_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsrld $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srld_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsrld $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -315,12 +315,12 @@ entry:
define <8 x i32> @test_srld_3(<8 x i32> %InVec) {
; X32-LABEL: test_srld_3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsrld $31, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srld_3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsrld $31, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -330,11 +330,11 @@ entry:
define <4 x i64> @test_srlq_1(<4 x i64> %InVec) {
; X32-LABEL: test_srlq_1:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_srlq_1:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: retq
entry:
%shl = lshr <4 x i64> %InVec, <i64 0, i64 0, i64 0, i64 0>
@@ -343,12 +343,12 @@ entry:
define <4 x i64> @test_srlq_2(<4 x i64> %InVec) {
; X32-LABEL: test_srlq_2:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsrlq $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srlq_2:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsrlq $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -358,12 +358,12 @@ entry:
define <4 x i64> @test_srlq_3(<4 x i64> %InVec) {
; X32-LABEL: test_srlq_3:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpsrlq $63, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srlq_3:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpsrlq $63, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -373,7 +373,7 @@ entry:
define <4 x i32> @srl_trunc_and_v4i64(<4 x i32> %x, <4 x i64> %y) nounwind {
; X32-LABEL: srl_trunc_and_v4i64:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; X32-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; X32-NEXT: vpbroadcastd {{.*#+}} xmm2 = [8,8,8,8]
@@ -383,7 +383,7 @@ define <4 x i32> @srl_trunc_and_v4i64(<4
; X32-NEXT: retl
;
; X64-LABEL: srl_trunc_and_v4i64:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; X64-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; X64-NEXT: vpbroadcastd {{.*#+}} xmm2 = [8,8,8,8]
@@ -403,7 +403,7 @@ define <4 x i32> @srl_trunc_and_v4i64(<4
define <8 x i16> @shl_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-LABEL: shl_8i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
@@ -414,7 +414,7 @@ define <8 x i16> @shl_8i16(<8 x i16> %r,
; X32-NEXT: retl
;
; X64-LABEL: shl_8i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
@@ -429,7 +429,7 @@ define <8 x i16> @shl_8i16(<8 x i16> %r,
define <16 x i16> @shl_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-LABEL: shl_16i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X32-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X32-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -443,7 +443,7 @@ define <16 x i16> @shl_16i16(<16 x i16>
; X32-NEXT: retl
;
; X64-LABEL: shl_16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X64-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -461,7 +461,7 @@ define <16 x i16> @shl_16i16(<16 x i16>
define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-LABEL: shl_32i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-NEXT: vpsllw $4, %ymm0, %ymm2
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
@@ -476,7 +476,7 @@ define <32 x i8> @shl_32i8(<32 x i8> %r,
; X32-NEXT: retl
;
; X64-LABEL: shl_32i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $5, %ymm1, %ymm1
; X64-NEXT: vpsllw $4, %ymm0, %ymm2
; X64-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
@@ -495,7 +495,7 @@ define <32 x i8> @shl_32i8(<32 x i8> %r,
define <8 x i16> @ashr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-LABEL: ashr_8i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovsxwd %xmm0, %ymm0
; X32-NEXT: vpsravd %ymm1, %ymm0, %ymm0
@@ -505,7 +505,7 @@ define <8 x i16> @ashr_8i16(<8 x i16> %r
; X32-NEXT: retl
;
; X64-LABEL: ashr_8i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovsxwd %xmm0, %ymm0
; X64-NEXT: vpsravd %ymm1, %ymm0, %ymm0
@@ -519,7 +519,7 @@ define <8 x i16> @ashr_8i16(<8 x i16> %r
define <16 x i16> @ashr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-LABEL: ashr_16i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X32-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X32-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -533,7 +533,7 @@ define <16 x i16> @ashr_16i16(<16 x i16>
; X32-NEXT: retl
;
; X64-LABEL: ashr_16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X64-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -551,7 +551,7 @@ define <16 x i16> @ashr_16i16(<16 x i16>
define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-LABEL: ashr_32i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; X32-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -579,7 +579,7 @@ define <32 x i8> @ashr_32i8(<32 x i8> %r
; X32-NEXT: retl
;
; X64-LABEL: ashr_32i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $5, %ymm1, %ymm1
; X64-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; X64-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -611,7 +611,7 @@ define <32 x i8> @ashr_32i8(<32 x i8> %r
define <8 x i16> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-LABEL: lshr_8i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
@@ -622,7 +622,7 @@ define <8 x i16> @lshr_8i16(<8 x i16> %r
; X32-NEXT: retl
;
; X64-LABEL: lshr_8i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
@@ -637,7 +637,7 @@ define <8 x i16> @lshr_8i16(<8 x i16> %r
define <16 x i16> @lshr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-LABEL: lshr_16i16:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X32-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X32-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -651,7 +651,7 @@ define <16 x i16> @lshr_16i16(<16 x i16>
; X32-NEXT: retl
;
; X64-LABEL: lshr_16i16:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X64-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -669,7 +669,7 @@ define <16 x i16> @lshr_16i16(<16 x i16>
define <32 x i8> @lshr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-LABEL: lshr_32i8:
-; X32: # BB#0:
+; X32: # %bb.0:
; X32-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-NEXT: vpsrlw $4, %ymm0, %ymm2
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
@@ -685,7 +685,7 @@ define <32 x i8> @lshr_32i8(<32 x i8> %r
; X32-NEXT: retl
;
; X64-LABEL: lshr_32i8:
-; X64: # BB#0:
+; X64: # %bb.0:
; X64-NEXT: vpsllw $5, %ymm1, %ymm1
; X64-NEXT: vpsrlw $4, %ymm0, %ymm2
; X64-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
Modified: llvm/trunk/test/CodeGen/X86/avx2-vperm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-vperm.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-vperm.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-vperm.ll Mon Dec 4 09:18:51 2017
@@ -4,13 +4,13 @@
define <8 x i32> @perm_cl_int_8x32(<8 x i32> %A) nounwind readnone {
; X32-LABEL: perm_cl_int_8x32:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vmovaps {{.*#+}} ymm1 = [0,7,2,1,2,7,6,0]
; X32-NEXT: vpermps %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: perm_cl_int_8x32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vmovaps {{.*#+}} ymm1 = [0,7,2,1,2,7,6,0]
; X64-NEXT: vpermps %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -22,13 +22,13 @@ entry:
define <8 x float> @perm_cl_fp_8x32(<8 x float> %A) nounwind readnone {
; X32-LABEL: perm_cl_fp_8x32:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vmovaps {{.*#+}} ymm1 = <u,7,2,u,4,u,1,6>
; X32-NEXT: vpermps %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: perm_cl_fp_8x32:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vmovaps {{.*#+}} ymm1 = <u,7,2,u,4,u,1,6>
; X64-NEXT: vpermps %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -39,12 +39,12 @@ entry:
define <4 x i64> @perm_cl_int_4x64(<4 x i64> %A) nounwind readnone {
; X32-LABEL: perm_cl_int_4x64:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,1]
; X32-NEXT: retl
;
; X64-LABEL: perm_cl_int_4x64:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,1]
; X64-NEXT: retq
entry:
@@ -54,12 +54,12 @@ entry:
define <4 x double> @perm_cl_fp_4x64(<4 x double> %A) nounwind readnone {
; X32-LABEL: perm_cl_fp_4x64:
-; X32: # BB#0: # %entry
+; X32: # %bb.0: # %entry
; X32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,1]
; X32-NEXT: retl
;
; X64-LABEL: perm_cl_fp_4x64:
-; X64: # BB#0: # %entry
+; X64: # %bb.0: # %entry
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,1]
; X64-NEXT: retq
entry:
Modified: llvm/trunk/test/CodeGen/X86/avx512-adc-sbb.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-adc-sbb.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-adc-sbb.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-adc-sbb.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@
define i8 @PR32316(i8 %t1, i32 %t5, i8 %t8) {
; CHECK-LABEL: PR32316:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %dil, %dil
; CHECK-NEXT: sete %al
Modified: llvm/trunk/test/CodeGen/X86/avx512-any_extend_load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-any_extend_load.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-any_extend_load.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-any_extend_load.ll Mon Dec 4 09:18:51 2017
@@ -5,14 +5,14 @@
define void @any_extend_load_v8i64(<8 x i8> * %ptr) {
; KNL-LABEL: any_extend_load_v8i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
; KNL-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; KNL-NEXT: vpmovqb %zmm0, (%rdi)
; KNL-NEXT: retq
;
; SKX-LABEL: any_extend_load_v8i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
; SKX-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; SKX-NEXT: vpmovqb %zmm0, (%rdi)
@@ -29,7 +29,7 @@ define void @any_extend_load_v8i64(<8 x
define void @any_extend_load_v8i32(<8 x i8> * %ptr) {
; KNL-LABEL: any_extend_load_v8i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; KNL-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
@@ -37,7 +37,7 @@ define void @any_extend_load_v8i32(<8 x
; KNL-NEXT: retq
;
; SKX-LABEL: any_extend_load_v8i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; SKX-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0
; SKX-NEXT: vpmovdb %ymm0, (%rdi)
@@ -54,7 +54,7 @@ define void @any_extend_load_v8i32(<8 x
define void @any_extend_load_v8i16(<8 x i8> * %ptr) {
; KNL-LABEL: any_extend_load_v8i16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; KNL-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
; KNL-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
@@ -62,7 +62,7 @@ define void @any_extend_load_v8i16(<8 x
; KNL-NEXT: retq
;
; SKX-LABEL: any_extend_load_v8i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SKX-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
; SKX-NEXT: vpmovwb %xmm0, (%rdi)
Modified: llvm/trunk/test/CodeGen/X86/avx512-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-arith.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-arith.ll Mon Dec 4 09:18:51 2017
@@ -7,7 +7,7 @@
define <8 x double> @addpd512(<8 x double> %y, <8 x double> %x) {
; CHECK-LABEL: addpd512:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -17,7 +17,7 @@ entry:
define <8 x double> @addpd512fold(<8 x double> %y) {
; CHECK-LABEL: addpd512fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -27,7 +27,7 @@ entry:
define <16 x float> @addps512(<16 x float> %y, <16 x float> %x) {
; CHECK-LABEL: addps512:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -37,7 +37,7 @@ entry:
define <16 x float> @addps512fold(<16 x float> %y) {
; CHECK-LABEL: addps512fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -47,7 +47,7 @@ entry:
define <8 x double> @subpd512(<8 x double> %y, <8 x double> %x) {
; CHECK-LABEL: subpd512:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsubpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -57,7 +57,7 @@ entry:
define <8 x double> @subpd512fold(<8 x double> %y, <8 x double>* %x) {
; CHECK-LABEL: subpd512fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsubpd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -68,7 +68,7 @@ entry:
define <16 x float> @subps512(<16 x float> %y, <16 x float> %x) {
; CHECK-LABEL: subps512:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsubps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -78,7 +78,7 @@ entry:
define <16 x float> @subps512fold(<16 x float> %y, <16 x float>* %x) {
; CHECK-LABEL: subps512fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsubps (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -89,7 +89,7 @@ entry:
define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
; AVX512F-LABEL: imulq512:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlq $32, %zmm1, %zmm2
; AVX512F-NEXT: vpmuludq %zmm0, %zmm2, %zmm2
; AVX512F-NEXT: vpsrlq $32, %zmm0, %zmm3
@@ -101,7 +101,7 @@ define <8 x i64> @imulq512(<8 x i64> %y,
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: imulq512:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlq $32, %zmm1, %zmm2
; AVX512VL-NEXT: vpmuludq %zmm0, %zmm2, %zmm2
; AVX512VL-NEXT: vpsrlq $32, %zmm0, %zmm3
@@ -113,7 +113,7 @@ define <8 x i64> @imulq512(<8 x i64> %y,
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: imulq512:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm2
; AVX512BW-NEXT: vpmuludq %zmm0, %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm3
@@ -125,12 +125,12 @@ define <8 x i64> @imulq512(<8 x i64> %y,
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: imulq512:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq512:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullq %zmm0, %zmm1, %zmm0
; SKX-NEXT: retq
%z = mul <8 x i64>%x, %y
@@ -139,7 +139,7 @@ define <8 x i64> @imulq512(<8 x i64> %y,
define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
; AVX512F-LABEL: imulq256:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlq $32, %ymm1, %ymm2
; AVX512F-NEXT: vpmuludq %ymm0, %ymm2, %ymm2
; AVX512F-NEXT: vpsrlq $32, %ymm0, %ymm3
@@ -151,7 +151,7 @@ define <4 x i64> @imulq256(<4 x i64> %y,
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: imulq256:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlq $32, %ymm1, %ymm2
; AVX512VL-NEXT: vpmuludq %ymm0, %ymm2, %ymm2
; AVX512VL-NEXT: vpsrlq $32, %ymm0, %ymm3
@@ -163,7 +163,7 @@ define <4 x i64> @imulq256(<4 x i64> %y,
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: imulq256:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlq $32, %ymm1, %ymm2
; AVX512BW-NEXT: vpmuludq %ymm0, %ymm2, %ymm2
; AVX512BW-NEXT: vpsrlq $32, %ymm0, %ymm3
@@ -175,7 +175,7 @@ define <4 x i64> @imulq256(<4 x i64> %y,
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: imulq256:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
@@ -183,7 +183,7 @@ define <4 x i64> @imulq256(<4 x i64> %y,
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq256:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullq %ymm0, %ymm1, %ymm0
; SKX-NEXT: retq
%z = mul <4 x i64>%x, %y
@@ -192,7 +192,7 @@ define <4 x i64> @imulq256(<4 x i64> %y,
define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
; AVX512F-LABEL: imulq128:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512F-NEXT: vpmuludq %xmm0, %xmm2, %xmm2
; AVX512F-NEXT: vpsrlq $32, %xmm0, %xmm3
@@ -204,7 +204,7 @@ define <2 x i64> @imulq128(<2 x i64> %y,
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: imulq128:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512VL-NEXT: vpmuludq %xmm0, %xmm2, %xmm2
; AVX512VL-NEXT: vpsrlq $32, %xmm0, %xmm3
@@ -216,7 +216,7 @@ define <2 x i64> @imulq128(<2 x i64> %y,
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: imulq128:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512BW-NEXT: vpmuludq %xmm0, %xmm2, %xmm2
; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm3
@@ -228,7 +228,7 @@ define <2 x i64> @imulq128(<2 x i64> %y,
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: imulq128:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
@@ -237,7 +237,7 @@ define <2 x i64> @imulq128(<2 x i64> %y,
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq128:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmullq %xmm0, %xmm1, %xmm0
; SKX-NEXT: retq
%z = mul <2 x i64>%x, %y
@@ -246,7 +246,7 @@ define <2 x i64> @imulq128(<2 x i64> %y,
define <8 x double> @mulpd512(<8 x double> %y, <8 x double> %x) {
; CHECK-LABEL: mulpd512:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmulpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -256,7 +256,7 @@ entry:
define <8 x double> @mulpd512fold(<8 x double> %y) {
; CHECK-LABEL: mulpd512fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmulpd {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -266,7 +266,7 @@ entry:
define <16 x float> @mulps512(<16 x float> %y, <16 x float> %x) {
; CHECK-LABEL: mulps512:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmulps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -276,7 +276,7 @@ entry:
define <16 x float> @mulps512fold(<16 x float> %y) {
; CHECK-LABEL: mulps512fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmulps {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -286,7 +286,7 @@ entry:
define <8 x double> @divpd512(<8 x double> %y, <8 x double> %x) {
; CHECK-LABEL: divpd512:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vdivpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -296,7 +296,7 @@ entry:
define <8 x double> @divpd512fold(<8 x double> %y) {
; CHECK-LABEL: divpd512fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vdivpd {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -306,7 +306,7 @@ entry:
define <16 x float> @divps512(<16 x float> %y, <16 x float> %x) {
; CHECK-LABEL: divps512:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vdivps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -316,7 +316,7 @@ entry:
define <16 x float> @divps512fold(<16 x float> %y) {
; CHECK-LABEL: divps512fold:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vdivps {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -326,7 +326,7 @@ entry:
define <8 x i64> @vpaddq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
; CHECK-LABEL: vpaddq_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = add <8 x i64> %i, %j
@@ -335,7 +335,7 @@ define <8 x i64> @vpaddq_test(<8 x i64>
define <8 x i64> @vpaddq_fold_test(<8 x i64> %i, <8 x i64>* %j) nounwind {
; CHECK-LABEL: vpaddq_fold_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddq (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%tmp = load <8 x i64>, <8 x i64>* %j, align 4
@@ -345,7 +345,7 @@ define <8 x i64> @vpaddq_fold_test(<8 x
define <8 x i64> @vpaddq_broadcast_test(<8 x i64> %i) nounwind {
; CHECK-LABEL: vpaddq_broadcast_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = add <8 x i64> %i, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -354,7 +354,7 @@ define <8 x i64> @vpaddq_broadcast_test(
define <8 x i64> @vpaddq_broadcast2_test(<8 x i64> %i, i64* %j) nounwind {
; CHECK-LABEL: vpaddq_broadcast2_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddq (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%tmp = load i64, i64* %j
@@ -372,7 +372,7 @@ define <8 x i64> @vpaddq_broadcast2_test
define <16 x i32> @vpaddd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
; CHECK-LABEL: vpaddd_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = add <16 x i32> %i, %j
@@ -381,7 +381,7 @@ define <16 x i32> @vpaddd_test(<16 x i32
define <16 x i32> @vpaddd_fold_test(<16 x i32> %i, <16 x i32>* %j) nounwind {
; CHECK-LABEL: vpaddd_fold_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%tmp = load <16 x i32>, <16 x i32>* %j, align 4
@@ -391,7 +391,7 @@ define <16 x i32> @vpaddd_fold_test(<16
define <16 x i32> @vpaddd_broadcast_test(<16 x i32> %i) nounwind {
; CHECK-LABEL: vpaddd_broadcast_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = add <16 x i32> %i, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
@@ -400,7 +400,7 @@ define <16 x i32> @vpaddd_broadcast_test
define <16 x i32> @vpaddd_mask_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_mask_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 {%k1}
@@ -413,7 +413,7 @@ define <16 x i32> @vpaddd_mask_test(<16
define <16 x i32> @vpaddd_maskz_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_maskz_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 {%k1} {z}
@@ -426,7 +426,7 @@ define <16 x i32> @vpaddd_maskz_test(<16
define <16 x i32> @vpaddd_mask_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_mask_fold_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1}
@@ -440,7 +440,7 @@ define <16 x i32> @vpaddd_mask_fold_test
define <16 x i32> @vpaddd_mask_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_mask_broadcast_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1}
@@ -453,7 +453,7 @@ define <16 x i32> @vpaddd_mask_broadcast
define <16 x i32> @vpaddd_maskz_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_maskz_fold_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1} {z}
@@ -467,7 +467,7 @@ define <16 x i32> @vpaddd_maskz_fold_tes
define <16 x i32> @vpaddd_maskz_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_maskz_broadcast_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} {z}
@@ -480,7 +480,7 @@ define <16 x i32> @vpaddd_maskz_broadcas
define <8 x i64> @vpsubq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
; CHECK-LABEL: vpsubq_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = sub <8 x i64> %i, %j
@@ -489,7 +489,7 @@ define <8 x i64> @vpsubq_test(<8 x i64>
define <16 x i32> @vpsubd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
; CHECK-LABEL: vpsubd_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = sub <16 x i32> %i, %j
@@ -498,7 +498,7 @@ define <16 x i32> @vpsubd_test(<16 x i32
define <16 x i32> @vpmulld_test(<16 x i32> %i, <16 x i32> %j) {
; CHECK-LABEL: vpmulld_test:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpmulld %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = mul <16 x i32> %i, %j
@@ -508,7 +508,7 @@ define <16 x i32> @vpmulld_test(<16 x i3
declare float @sqrtf(float) readnone
define float @sqrtA(float %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: sqrtA:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -519,7 +519,7 @@ entry:
declare double @sqrt(double) readnone
define double @sqrtB(double %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: sqrtB:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -530,7 +530,7 @@ entry:
declare float @llvm.sqrt.f32(float)
define float @sqrtC(float %a) nounwind {
; CHECK-LABEL: sqrtC:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
%b = call float @llvm.sqrt.f32(float %a)
@@ -540,7 +540,7 @@ define float @sqrtC(float %a) nounwind {
declare <16 x float> @llvm.sqrt.v16f32(<16 x float>)
define <16 x float> @sqrtD(<16 x float> %a) nounwind {
; CHECK-LABEL: sqrtD:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vsqrtps %zmm0, %zmm0
; CHECK-NEXT: retq
%b = call <16 x float> @llvm.sqrt.v16f32(<16 x float> %a)
@@ -550,7 +550,7 @@ define <16 x float> @sqrtD(<16 x float>
declare <8 x double> @llvm.sqrt.v8f64(<8 x double>)
define <8 x double> @sqrtE(<8 x double> %a) nounwind {
; CHECK-LABEL: sqrtE:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vsqrtpd %zmm0, %zmm0
; CHECK-NEXT: retq
%b = call <8 x double> @llvm.sqrt.v8f64(<8 x double> %a)
@@ -559,7 +559,7 @@ define <8 x double> @sqrtE(<8 x double>
define <16 x float> @fadd_broadcast(<16 x float> %a) nounwind {
; CHECK-LABEL: fadd_broadcast:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; CHECK-NEXT: retq
%b = fadd <16 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000>
@@ -568,7 +568,7 @@ define <16 x float> @fadd_broadcast(<16
define <8 x i64> @addq_broadcast(<8 x i64> %a) nounwind {
; CHECK-LABEL: addq_broadcast:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%b = add <8 x i64> %a, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -577,27 +577,27 @@ define <8 x i64> @addq_broadcast(<8 x i6
define <8 x i64> @orq_broadcast(<8 x i64> %a) nounwind {
; AVX512F-LABEL: orq_broadcast:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vporq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: orq_broadcast:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vporq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: orq_broadcast:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vporq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: orq_broadcast:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: orq_broadcast:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; SKX-NEXT: retq
%b = or <8 x i64> %a, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -606,27 +606,27 @@ define <8 x i64> @orq_broadcast(<8 x i64
define <16 x i32> @andd512fold(<16 x i32> %y, <16 x i32>* %x) {
; AVX512F-LABEL: andd512fold:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpandq (%rdi), %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: andd512fold:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpandq (%rdi), %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: andd512fold:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpandq (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: andd512fold:
-; AVX512DQ: # BB#0: # %entry
+; AVX512DQ: # %bb.0: # %entry
; AVX512DQ-NEXT: vandps (%rdi), %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: andd512fold:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vandps (%rdi), %zmm0, %zmm0
; SKX-NEXT: retq
entry:
@@ -637,27 +637,27 @@ entry:
define <8 x i64> @andqbrst(<8 x i64> %p1, i64* %ap) {
; AVX512F-LABEL: andqbrst:
-; AVX512F: # BB#0: # %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpandq (%rdi){1to8}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: andqbrst:
-; AVX512VL: # BB#0: # %entry
+; AVX512VL: # %bb.0: # %entry
; AVX512VL-NEXT: vpandq (%rdi){1to8}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: andqbrst:
-; AVX512BW: # BB#0: # %entry
+; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpandq (%rdi){1to8}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: andqbrst:
-; AVX512DQ: # BB#0: # %entry
+; AVX512DQ: # %bb.0: # %entry
; AVX512DQ-NEXT: vandpd (%rdi){1to8}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: andqbrst:
-; SKX: # BB#0: # %entry
+; SKX: # %bb.0: # %entry
; SKX-NEXT: vandpd (%rdi){1to8}, %zmm0, %zmm0
; SKX-NEXT: retq
entry:
@@ -670,7 +670,7 @@ entry:
define <16 x float> @test_mask_vaddps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vaddps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm0 {%k1}
@@ -685,7 +685,7 @@ define <16 x float> @test_mask_vaddps(<1
define <16 x float> @test_mask_vmulps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vmulps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vmulps %zmm2, %zmm1, %zmm0 {%k1}
@@ -700,7 +700,7 @@ define <16 x float> @test_mask_vmulps(<1
define <16 x float> @test_mask_vminps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vminps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vminps %zmm2, %zmm1, %zmm0 {%k1}
@@ -716,7 +716,7 @@ define <16 x float> @test_mask_vminps(<1
define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-LABEL: test_mask_vminpd:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512F-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512F-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
@@ -724,14 +724,14 @@ define <8 x double> @test_mask_vminpd(<8
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_mask_vminpd:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512VL-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; AVX512VL-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: test_mask_vminpd:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512BW-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512BW-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
@@ -739,7 +739,7 @@ define <8 x double> @test_mask_vminpd(<8
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_mask_vminpd:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512DQ-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512DQ-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
@@ -747,7 +747,7 @@ define <8 x double> @test_mask_vminpd(<8
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: test_mask_vminpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4
; SKX-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; SKX-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -763,7 +763,7 @@ define <8 x double> @test_mask_vminpd(<8
define <16 x float> @test_mask_vmaxps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vmaxps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vmaxps %zmm2, %zmm1, %zmm0 {%k1}
@@ -779,7 +779,7 @@ define <16 x float> @test_mask_vmaxps(<1
define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-LABEL: test_mask_vmaxpd:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512F-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512F-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
@@ -787,14 +787,14 @@ define <8 x double> @test_mask_vmaxpd(<8
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_mask_vmaxpd:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512VL-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; AVX512VL-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: test_mask_vmaxpd:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512BW-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512BW-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
@@ -802,7 +802,7 @@ define <8 x double> @test_mask_vmaxpd(<8
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_mask_vmaxpd:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512DQ-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512DQ-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
@@ -810,7 +810,7 @@ define <8 x double> @test_mask_vmaxpd(<8
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: test_mask_vmaxpd:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm4, %xmm4, %xmm4
; SKX-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; SKX-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -826,7 +826,7 @@ define <8 x double> @test_mask_vmaxpd(<8
define <16 x float> @test_mask_vsubps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vsubps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vsubps %zmm2, %zmm1, %zmm0 {%k1}
@@ -841,7 +841,7 @@ define <16 x float> @test_mask_vsubps(<1
define <16 x float> @test_mask_vdivps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vdivps:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vdivps %zmm2, %zmm1, %zmm0 {%k1}
@@ -856,7 +856,7 @@ define <16 x float> @test_mask_vdivps(<1
define <8 x double> @test_mask_vaddpd(<8 x double> %dst, <8 x double> %i,
; CHECK-LABEL: test_mask_vaddpd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vpcmpneqq %zmm4, %zmm3, %k1
; CHECK-NEXT: vaddpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -871,7 +871,7 @@ define <8 x double> @test_mask_vaddpd(<8
define <8 x double> @test_maskz_vaddpd(<8 x double> %i, <8 x double> %j,
; CHECK-LABEL: test_maskz_vaddpd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpneqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0 {%k1} {z}
@@ -885,7 +885,7 @@ define <8 x double> @test_maskz_vaddpd(<
define <8 x double> @test_mask_fold_vaddpd(<8 x double> %dst, <8 x double> %i,
; CHECK-LABEL: test_mask_fold_vaddpd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpcmpneqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vaddpd (%rdi), %zmm1, %zmm0 {%k1}
@@ -901,7 +901,7 @@ define <8 x double> @test_mask_fold_vadd
define <8 x double> @test_maskz_fold_vaddpd(<8 x double> %i, <8 x double>* %j,
; CHECK-LABEL: test_maskz_fold_vaddpd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vaddpd (%rdi), %zmm0, %zmm0 {%k1} {z}
@@ -916,7 +916,7 @@ define <8 x double> @test_maskz_fold_vad
define <8 x double> @test_broadcast_vaddpd(<8 x double> %i, double* %j) nounwind {
; CHECK-LABEL: test_broadcast_vaddpd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vaddpd (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%tmp = load double, double* %j
@@ -929,7 +929,7 @@ define <8 x double> @test_broadcast_vadd
define <8 x double> @test_mask_broadcast_vaddpd(<8 x double> %dst, <8 x double> %i,
; CHECK-LABEL: test_mask_broadcast_vaddpd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vpcmpneqq %zmm0, %zmm2, %k1
; CHECK-NEXT: vaddpd (%rdi){1to8}, %zmm1, %zmm1 {%k1}
@@ -948,7 +948,7 @@ define <8 x double> @test_mask_broadcast
define <8 x double> @test_maskz_broadcast_vaddpd(<8 x double> %i, double* %j,
; CHECK-LABEL: test_maskz_broadcast_vaddpd:
-; CHECK: # BB#0:
+; CHECK: # %bb.0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpcmpneqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vaddpd (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
@@ -966,27 +966,27 @@ define <8 x double> @test_maskz_broadcas
define <16 x float> @test_fxor(<16 x float> %a) {
; AVX512F-LABEL: test_fxor:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_fxor:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: test_fxor:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_fxor:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: test_fxor:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; SKX-NEXT: retq
@@ -996,30 +996,30 @@ define <16 x float> @test_fxor(<16 x fl
define <8 x float> @test_fxor_8f32(<8 x float> %a) {
; AVX512F-LABEL: test_fxor_8f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vbroadcastss {{.*#+}} ymm1 = [-0,-0,-0,-0,-0,-0,-0,-0]
; AVX512F-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_fxor_8f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxord {{.*}}(%rip){1to8}, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: test_fxor_8f32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vbroadcastss {{.*#+}} ymm1 = [-0,-0,-0,-0,-0,-0,-0,-0]
; AVX512BW-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_fxor_8f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vbroadcastss {{.*#+}} ymm1 = [-0,-0,-0,-0,-0,-0,-0,-0]
; AVX512DQ-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: test_fxor_8f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vxorps {{.*}}(%rip){1to8}, %ymm0, %ymm0
; SKX-NEXT: retq
%res = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
@@ -1028,27 +1028,27 @@ define <8 x float> @test_fxor_8f32(<8 x
define <8 x double> @fabs_v8f64(<8 x double> %p)
; AVX512F-LABEL: fabs_v8f64:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fabs_v8f64:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: fabs_v8f64:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: fabs_v8f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: fabs_v8f64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; SKX-NEXT: retq
{
@@ -1059,27 +1059,27 @@ declare <8 x double> @llvm.fabs.v8f64(<8
define <16 x float> @fabs_v16f32(<16 x float> %p)
; AVX512F-LABEL: fabs_v16f32:
-; AVX512F: # BB#0:
+; AVX512F: # %bb.0:
; AVX512F-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fabs_v16f32:
-; AVX512VL: # BB#0:
+; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: fabs_v16f32:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: fabs_v16f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: fabs_v16f32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; SKX-NEXT: retq
{
Modified: llvm/trunk/test/CodeGen/X86/avx512-bugfix-23634.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-bugfix-23634.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-bugfix-23634.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-bugfix-23634.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@ target triple = "x86_64-unknown-linux-gn
define void @f_fu(float* %ret, float* %aa, float %b) {
; CHECK-LABEL: f_fu:
-; CHECK: ## BB#0: ## %allocas
+; CHECK: ## %bb.0: ## %allocas
; CHECK-NEXT: vcvttss2si %xmm0, %eax
; CHECK-NEXT: vpbroadcastd %eax, %zmm0
; CHECK-NEXT: vcvttps2dq (%rsi), %zmm1
Modified: llvm/trunk/test/CodeGen/X86/avx512-bugfix-25270.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-bugfix-25270.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-bugfix-25270.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-bugfix-25270.ll Mon Dec 4 09:18:51 2017
@@ -5,7 +5,7 @@ declare void @Print__512(<16 x i32>) #0
define void @bar__512(<16 x i32>* %var) #0 {
; CHECK-LABEL: bar__512:
-; CHECK: ## BB#0: ## %allocas
+; CHECK: ## %bb.0: ## %allocas
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: subq $112, %rsp
; CHECK-NEXT: movq %rdi, %rbx
Modified: llvm/trunk/test/CodeGen/X86/avx512-bugfix-26264.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-bugfix-26264.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-bugfix-26264.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-bugfix-26264.ll Mon Dec 4 09:18:51 2017
@@ -3,7 +3,7 @@
define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32 x double> %src0) {
; AVX512BW-LABEL: test_load_32f64:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovb2m %zmm0, %k1
; AVX512BW-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k1}
@@ -21,7 +21,7 @@ define <32 x double> @test_load_32f64(<3
define <32 x i64> @test_load_32i64(<32 x i64>* %ptrs, <32 x i1> %mask, <32 x i64> %src0) {
; AVX512BW-LABEL: test_load_32i64:
-; AVX512BW: ## BB#0:
+; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovb2m %zmm0, %k1
; AVX512BW-NEXT: vpblendmq (%rdi), %zmm1, %zmm0 {%k1}
Modified: llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll Mon Dec 4 09:18:51 2017
@@ -3,7 +3,7 @@
define <16 x i32> @test2(<16 x i32> %x) {
; CHECK-LABEL: test2:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -13,7 +13,7 @@ define <16 x i32> @test2(<16 x i32> %x)
define <16 x float> @test3(<4 x float> %a) {
; CHECK-LABEL: test3:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,2,3,4,18,16,7,8,9,10,11,12,13,14,15]
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
Modified: llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll Mon Dec 4 09:18:51 2017
@@ -5,12 +5,12 @@
define <16 x i1> @test1() {
; ALL_X64-LABEL: test1:
-; ALL_X64: ## BB#0:
+; ALL_X64: ## %bb.0:
; ALL_X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; ALL_X64-NEXT: retq
;
; KNL_X32-LABEL: test1:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; KNL_X32-NEXT: retl
ret <16 x i1> zeroinitializer
@@ -18,7 +18,7 @@ define <16 x i1> @test1() {
define <16 x i1> @test2(<16 x i1>%a, <16 x i1>%b) {
; KNL-LABEL: test2:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
@@ -30,7 +30,7 @@ define <16 x i1> @test2(<16 x i1>%a, <16
; KNL-NEXT: retq
;
; SKX-LABEL: test2:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1
; SKX-NEXT: vpmovb2m %xmm1, %k0
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
@@ -40,7 +40,7 @@ define <16 x i1> @test2(<16 x i1>%a, <16
; SKX-NEXT: retq
;
; KNL_X32-LABEL: test2:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL_X32-NEXT: vpslld $31, %zmm1, %zmm1
; KNL_X32-NEXT: vpmovsxbd %xmm0, %zmm0
@@ -56,7 +56,7 @@ define <16 x i1> @test2(<16 x i1>%a, <16
define <8 x i1> @test3(<8 x i1>%a, <8 x i1>%b) {
; KNL-LABEL: test3:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
@@ -68,7 +68,7 @@ define <8 x i1> @test3(<8 x i1>%a, <8 x
; KNL-NEXT: retq
;
; SKX-LABEL: test3:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k0
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
@@ -78,7 +78,7 @@ define <8 x i1> @test3(<8 x i1>%a, <8 x
; SKX-NEXT: retq
;
; KNL_X32-LABEL: test3:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL_X32-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL_X32-NEXT: vpmovsxwq %xmm0, %zmm0
@@ -94,12 +94,12 @@ define <8 x i1> @test3(<8 x i1>%a, <8 x
define <4 x i1> @test4(<4 x i1>%a, <4 x i1>%b) {
; KNL-LABEL: test4:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vandps %xmm1, %xmm0, %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test4:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
@@ -108,7 +108,7 @@ define <4 x i1> @test4(<4 x i1>%a, <4 x
; SKX-NEXT: retq
;
; KNL_X32-LABEL: test4:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: vandps %xmm1, %xmm0, %xmm0
; KNL_X32-NEXT: retl
%c = and <4 x i1>%a, %b
@@ -119,7 +119,7 @@ declare <8 x i1> @func8xi1(<8 x i1> %a)
define <8 x i32> @test5(<8 x i32>%a, <8 x i32>%b) {
; KNL-LABEL: test5:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rax
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
@@ -133,7 +133,7 @@ define <8 x i32> @test5(<8 x i32>%a, <8
; KNL-NEXT: retq
;
; SKX-LABEL: test5:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: pushq %rax
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
@@ -147,7 +147,7 @@ define <8 x i32> @test5(<8 x i32>%a, <8
; SKX-NEXT: retq
;
; KNL_X32-LABEL: test5:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: subl $12, %esp
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
@@ -169,7 +169,7 @@ declare <16 x i1> @func16xi1(<16 x i1> %
define <16 x i32> @test6(<16 x i32>%a, <16 x i32>%b) {
; KNL-LABEL: test6:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rax
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
@@ -183,7 +183,7 @@ define <16 x i32> @test6(<16 x i32>%a, <
; KNL-NEXT: retq
;
; SKX-LABEL: test6:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: pushq %rax
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
@@ -197,7 +197,7 @@ define <16 x i32> @test6(<16 x i32>%a, <
; SKX-NEXT: retq
;
; KNL_X32-LABEL: test6:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: subl $12, %esp
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
@@ -219,7 +219,7 @@ declare <4 x i1> @func4xi1(<4 x i1> %a)
define <4 x i32> @test7(<4 x i32>%a, <4 x i32>%b) {
; KNL-LABEL: test7:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rax
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
@@ -230,7 +230,7 @@ define <4 x i32> @test7(<4 x i32>%a, <4
; KNL-NEXT: retq
;
; SKX-LABEL: test7:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: pushq %rax
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
@@ -242,7 +242,7 @@ define <4 x i32> @test7(<4 x i32>%a, <4
; SKX-NEXT: retq
;
; KNL_X32-LABEL: test7:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: subl $12, %esp
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
@@ -259,7 +259,7 @@ define <4 x i32> @test7(<4 x i32>%a, <4
define <8 x i1> @test7a(<8 x i32>%a, <8 x i32>%b) {
; KNL-LABEL: test7a:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: pushq %rax
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
@@ -277,7 +277,7 @@ define <8 x i1> @test7a(<8 x i32>%a, <8
; KNL-NEXT: retq
;
; SKX-LABEL: test7a:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: pushq %rax
; SKX-NEXT: .cfi_def_cfa_offset 16
; SKX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
@@ -294,7 +294,7 @@ define <8 x i1> @test7a(<8 x i32>%a, <8
; SKX-NEXT: retq
;
; KNL_X32-LABEL: test7a:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: subl $12, %esp
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
@@ -318,19 +318,19 @@ define <8 x i1> @test7a(<8 x i32>%a, <8
define <16 x i8> @test8(<16 x i8> %a1, <16 x i8> %a2, i1 %cond) {
; ALL_X64-LABEL: test8:
-; ALL_X64: ## BB#0:
+; ALL_X64: ## %bb.0:
; ALL_X64-NEXT: testb $1, %dil
; ALL_X64-NEXT: jne LBB8_2
-; ALL_X64-NEXT: ## BB#1:
+; ALL_X64-NEXT: ## %bb.1:
; ALL_X64-NEXT: vmovaps %xmm1, %xmm0
; ALL_X64-NEXT: LBB8_2:
; ALL_X64-NEXT: retq
;
; KNL_X32-LABEL: test8:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: testb $1, {{[0-9]+}}(%esp)
; KNL_X32-NEXT: jne LBB8_2
-; KNL_X32-NEXT: ## BB#1:
+; KNL_X32-NEXT: ## %bb.1:
; KNL_X32-NEXT: vmovaps %xmm1, %xmm0
; KNL_X32-NEXT: LBB8_2:
; KNL_X32-NEXT: retl
@@ -340,13 +340,13 @@ define <16 x i8> @test8(<16 x i8> %a1, <
define i1 @test9(double %a, double %b) {
; ALL_X64-LABEL: test9:
-; ALL_X64: ## BB#0:
+; ALL_X64: ## %bb.0:
; ALL_X64-NEXT: vucomisd %xmm0, %xmm1
; ALL_X64-NEXT: setb %al
; ALL_X64-NEXT: retq
;
; KNL_X32-LABEL: test9:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; KNL_X32-NEXT: vucomisd {{[0-9]+}}(%esp), %xmm0
; KNL_X32-NEXT: setb %al
@@ -357,14 +357,14 @@ define i1 @test9(double %a, double %b) {
define i32 @test10(i32 %a, i32 %b, i1 %cond) {
; ALL_X64-LABEL: test10:
-; ALL_X64: ## BB#0:
+; ALL_X64: ## %bb.0:
; ALL_X64-NEXT: testb $1, %dl
; ALL_X64-NEXT: cmovel %esi, %edi
; ALL_X64-NEXT: movl %edi, %eax
; ALL_X64-NEXT: retq
;
; KNL_X32-LABEL: test10:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: testb $1, {{[0-9]+}}(%esp)
; KNL_X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; KNL_X32-NEXT: leal {{[0-9]+}}(%esp), %ecx
@@ -377,13 +377,13 @@ define i32 @test10(i32 %a, i32 %b, i1 %c
define i1 @test11(i32 %a, i32 %b) {
; ALL_X64-LABEL: test11:
-; ALL_X64: ## BB#0:
+; ALL_X64: ## %bb.0:
; ALL_X64-NEXT: cmpl %esi, %edi
; ALL_X64-NEXT: setg %al
; ALL_X64-NEXT: retq
;
; KNL_X32-LABEL: test11:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_X32-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; KNL_X32-NEXT: setg %al
@@ -394,7 +394,7 @@ define i1 @test11(i32 %a, i32 %b) {
define i32 @test12(i32 %a1, i32 %a2, i32 %b1) {
; ALL_X64-LABEL: test12:
-; ALL_X64: ## BB#0:
+; ALL_X64: ## %bb.0:
; ALL_X64-NEXT: pushq %rbp
; ALL_X64-NEXT: .cfi_def_cfa_offset 16
; ALL_X64-NEXT: pushq %r14
@@ -422,7 +422,7 @@ define i32 @test12(i32 %a1, i32 %a2, i32
; ALL_X64-NEXT: retq
;
; KNL_X32-LABEL: test12:
-; KNL_X32: ## BB#0:
+; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: pushl %ebx
; KNL_X32-NEXT: .cfi_def_cfa_offset 8
; KNL_X32-NEXT: pushl %edi
Modified: llvm/trunk/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-cmp-kor-sequence.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-cmp-kor-sequence.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-cmp-kor-sequence.ll Mon Dec 4 09:18:51 2017
@@ -10,7 +10,7 @@ target triple = "x86_64-unknown-linux-gn
; Function Attrs: nounwind readnone uwtable
define zeroext i16 @cmp_kor_seq_16(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x float> %d, <16 x float> %x) local_unnamed_addr #0 {
; CHECK-LABEL: cmp_kor_seq_16:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vcmpgeps %zmm4, %zmm0, %k0
; CHECK-NEXT: vcmpgeps %zmm4, %zmm1, %k1
; CHECK-NEXT: korw %k1, %k0, %k0
Modified: llvm/trunk/test/CodeGen/X86/avx512-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-cmp.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-cmp.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define double @test1(double %a, double %b) nounwind {
; ALL-LABEL: test1:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vucomisd %xmm1, %xmm0
; ALL-NEXT: jne LBB0_1
; ALL-NEXT: jnp LBB0_2
@@ -28,10 +28,10 @@ l2:
define float @test2(float %a, float %b) nounwind {
; ALL-LABEL: test2:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vucomiss %xmm0, %xmm1
; ALL-NEXT: jbe LBB1_2
-; ALL-NEXT: ## BB#1: ## %l1
+; ALL-NEXT: ## %bb.1: ## %l1
; ALL-NEXT: vsubss %xmm1, %xmm0, %xmm0
; ALL-NEXT: retq
; ALL-NEXT: LBB1_2: ## %l2
@@ -51,14 +51,14 @@ l2:
define i32 @test3(float %a, float %b) {
; KNL-LABEL: test3:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vcmpeqss %xmm1, %xmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: movzbl %al, %eax
; KNL-NEXT: retq
;
; SKX-LABEL: test3:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vcmpeqss %xmm1, %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: movzbl %al, %eax
@@ -71,12 +71,12 @@ define i32 @test3(float %a, float %b) {
define float @test5(float %p) #0 {
; ALL-LABEL: test5:
-; ALL: ## BB#0: ## %entry
+; ALL: ## %bb.0: ## %entry
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; ALL-NEXT: vucomiss %xmm1, %xmm0
; ALL-NEXT: jne LBB3_1
; ALL-NEXT: jp LBB3_1
-; ALL-NEXT: ## BB#2: ## %return
+; ALL-NEXT: ## %bb.2: ## %return
; ALL-NEXT: retq
; ALL-NEXT: LBB3_1: ## %if.end
; ALL-NEXT: seta %al
@@ -100,7 +100,7 @@ return:
define i32 @test6(i32 %a, i32 %b) {
; ALL-LABEL: test6:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: sete %al
@@ -112,7 +112,7 @@ define i32 @test6(i32 %a, i32 %b) {
define i32 @test7(double %x, double %y) #2 {
; ALL-LABEL: test7:
-; ALL: ## BB#0: ## %entry
+; ALL: ## %bb.0: ## %entry
; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: vucomisd %xmm1, %xmm0
; ALL-NEXT: setne %al
@@ -125,7 +125,7 @@ entry:
define i32 @test8(i32 %a1, i32 %a2, i32 %a3) {
; ALL-LABEL: test8:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: notl %edi
; ALL-NEXT: xorl $-2147483648, %esi ## imm = 0x80000000
; ALL-NEXT: testl %edx, %edx
@@ -145,10 +145,10 @@ define i32 @test8(i32 %a1, i32 %a2, i32
define i32 @test9(i64 %a) {
; ALL-LABEL: test9:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: testb $1, %dil
; ALL-NEXT: jne LBB7_2
-; ALL-NEXT: ## BB#1: ## %A
+; ALL-NEXT: ## %bb.1: ## %A
; ALL-NEXT: movl $6, %eax
; ALL-NEXT: retq
; ALL-NEXT: LBB7_2: ## %B
@@ -165,7 +165,7 @@ B:
define i32 @test10(i64 %b, i64 %c, i1 %d) {
; ALL-LABEL: test10:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: movl %edx, %eax
; ALL-NEXT: andb $1, %al
; ALL-NEXT: cmpq %rsi, %rdi
@@ -174,7 +174,7 @@ define i32 @test10(i64 %b, i64 %c, i1 %d
; ALL-NEXT: andb $1, %cl
; ALL-NEXT: cmpb %cl, %al
; ALL-NEXT: je LBB8_1
-; ALL-NEXT: ## BB#2: ## %if.end.i
+; ALL-NEXT: ## %bb.2: ## %if.end.i
; ALL-NEXT: movl $6, %eax
; ALL-NEXT: retq
; ALL-NEXT: LBB8_1: ## %if.then.i
Modified: llvm/trunk/test/CodeGen/X86/avx512-cvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-cvt.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-cvt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-cvt.ll Mon Dec 4 09:18:51 2017
@@ -10,7 +10,7 @@
define <16 x float> @sitof32(<16 x i32> %a) nounwind {
; ALL-LABEL: sitof32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtdq2ps %zmm0, %zmm0
; ALL-NEXT: retq
%b = sitofp <16 x i32> %a to <16 x float>
@@ -19,7 +19,7 @@ define <16 x float> @sitof32(<16 x i32>
define <8 x double> @sltof864(<8 x i64> %a) {
; NODQ-LABEL: sltof864:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; NODQ-NEXT: vpextrq $1, %xmm1, %rax
; NODQ-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm2
@@ -49,7 +49,7 @@ define <8 x double> @sltof864(<8 x i64>
; NODQ-NEXT: retq
;
; DQ-LABEL: sltof864:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; DQ-NEXT: retq
%b = sitofp <8 x i64> %a to <8 x double>
@@ -58,7 +58,7 @@ define <8 x double> @sltof864(<8 x i64>
define <4 x double> @slto4f64(<4 x i64> %a) {
; NODQ-LABEL: slto4f64:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti128 $1, %ymm0, %xmm1
; NODQ-NEXT: vpextrq $1, %xmm1, %rax
; NODQ-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm2
@@ -74,12 +74,12 @@ define <4 x double> @slto4f64(<4 x i64>
; NODQ-NEXT: retq
;
; VLDQ-LABEL: slto4f64:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vcvtqq2pd %ymm0, %ymm0
; VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: slto4f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -90,7 +90,7 @@ define <4 x double> @slto4f64(<4 x i64>
define <2 x double> @slto2f64(<2 x i64> %a) {
; NODQ-LABEL: slto2f64:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vpextrq $1, %xmm0, %rax
; NODQ-NEXT: vcvtsi2sdq %rax, %xmm1, %xmm1
; NODQ-NEXT: vmovq %xmm0, %rax
@@ -99,12 +99,12 @@ define <2 x double> @slto2f64(<2 x i64>
; NODQ-NEXT: retq
;
; VLDQ-LABEL: slto2f64:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vcvtqq2pd %xmm0, %xmm0
; VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: slto2f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -116,7 +116,7 @@ define <2 x double> @slto2f64(<2 x i64>
define <2 x float> @sltof2f32(<2 x i64> %a) {
; NODQ-LABEL: sltof2f32:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vpextrq $1, %xmm0, %rax
; NODQ-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; NODQ-NEXT: vmovq %xmm0, %rax
@@ -127,12 +127,12 @@ define <2 x float> @sltof2f32(<2 x i64>
; NODQ-NEXT: retq
;
; VLDQ-LABEL: sltof2f32:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vcvtqq2ps %xmm0, %xmm0
; VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: sltof2f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -144,7 +144,7 @@ define <2 x float> @sltof2f32(<2 x i64>
define <4 x float> @slto4f32_mem(<4 x i64>* %a) {
; NODQ-LABEL: slto4f32_mem:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vmovdqu (%rdi), %ymm0
; NODQ-NEXT: vpextrq $1, %xmm0, %rax
; NODQ-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
@@ -162,12 +162,12 @@ define <4 x float> @slto4f32_mem(<4 x i6
; NODQ-NEXT: retq
;
; VLDQ-LABEL: slto4f32_mem:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vcvtqq2psy (%rdi), %xmm0
; VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: slto4f32_mem:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovups (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -180,7 +180,7 @@ define <4 x float> @slto4f32_mem(<4 x i6
define <4 x i64> @f64to4sl(<4 x double> %a) {
; NODQ-LABEL: f64to4sl:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextractf128 $1, %ymm0, %xmm1
; NODQ-NEXT: vcvttsd2si %xmm1, %rax
; NODQ-NEXT: vmovq %rax, %xmm2
@@ -198,12 +198,12 @@ define <4 x i64> @f64to4sl(<4 x double>
; NODQ-NEXT: retq
;
; VLDQ-LABEL: f64to4sl:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vcvttpd2qq %ymm0, %ymm0
; VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: f64to4sl:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -214,7 +214,7 @@ define <4 x i64> @f64to4sl(<4 x double>
define <4 x i64> @f32to4sl(<4 x float> %a) {
; NODQ-LABEL: f32to4sl:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; NODQ-NEXT: vcvttss2si %xmm1, %rax
; NODQ-NEXT: vmovq %rax, %xmm1
@@ -232,12 +232,12 @@ define <4 x i64> @f32to4sl(<4 x float> %
; NODQ-NEXT: retq
;
; VLDQ-LABEL: f32to4sl:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vcvttps2qq %xmm0, %ymm0
; VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: f32to4sl:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
@@ -248,7 +248,7 @@ define <4 x i64> @f32to4sl(<4 x float> %
define <4 x float> @slto4f32(<4 x i64> %a) {
; NODQ-LABEL: slto4f32:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vpextrq $1, %xmm0, %rax
; NODQ-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm1
; NODQ-NEXT: vmovq %xmm0, %rax
@@ -265,13 +265,13 @@ define <4 x float> @slto4f32(<4 x i64> %
; NODQ-NEXT: retq
;
; VLDQ-LABEL: slto4f32:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vcvtqq2ps %ymm0, %xmm0
; VLDQ-NEXT: vzeroupper
; VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: slto4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -283,7 +283,7 @@ define <4 x float> @slto4f32(<4 x i64> %
define <4 x float> @ulto4f32(<4 x i64> %a) {
; NODQ-LABEL: ulto4f32:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vpextrq $1, %xmm0, %rax
; NODQ-NEXT: vcvtusi2ssq %rax, %xmm1, %xmm1
; NODQ-NEXT: vmovq %xmm0, %rax
@@ -300,13 +300,13 @@ define <4 x float> @ulto4f32(<4 x i64> %
; NODQ-NEXT: retq
;
; VLDQ-LABEL: ulto4f32:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vcvtuqq2ps %ymm0, %xmm0
; VLDQ-NEXT: vzeroupper
; VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: ulto4f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -318,7 +318,7 @@ define <4 x float> @ulto4f32(<4 x i64> %
define <8 x double> @ulto8f64(<8 x i64> %a) {
; NODQ-LABEL: ulto8f64:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; NODQ-NEXT: vpextrq $1, %xmm1, %rax
; NODQ-NEXT: vcvtusi2sdq %rax, %xmm2, %xmm2
@@ -348,7 +348,7 @@ define <8 x double> @ulto8f64(<8 x i64>
; NODQ-NEXT: retq
;
; DQ-LABEL: ulto8f64:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
; DQ-NEXT: retq
%b = uitofp <8 x i64> %a to <8 x double>
@@ -357,7 +357,7 @@ define <8 x double> @ulto8f64(<8 x i64>
define <16 x double> @ulto16f64(<16 x i64> %a) {
; NODQ-LABEL: ulto16f64:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $3, %zmm0, %xmm2
; NODQ-NEXT: vpextrq $1, %xmm2, %rax
; NODQ-NEXT: vcvtusi2sdq %rax, %xmm3, %xmm3
@@ -413,7 +413,7 @@ define <16 x double> @ulto16f64(<16 x i6
; NODQ-NEXT: retq
;
; DQ-LABEL: ulto16f64:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
; DQ-NEXT: vcvtuqq2pd %zmm1, %zmm1
; DQ-NEXT: retq
@@ -423,7 +423,7 @@ define <16 x double> @ulto16f64(<16 x i6
define <16 x i32> @f64to16si(<16 x float> %a) nounwind {
; ALL-LABEL: f64to16si:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttps2dq %zmm0, %zmm0
; ALL-NEXT: retq
%b = fptosi <16 x float> %a to <16 x i32>
@@ -432,7 +432,7 @@ define <16 x i32> @f64to16si(<16 x float
define <16 x i8> @f32to16sc(<16 x float> %f) {
; ALL-LABEL: f32to16sc:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttps2dq %zmm0, %zmm0
; ALL-NEXT: vpmovdb %zmm0, %xmm0
; ALL-NEXT: vzeroupper
@@ -443,7 +443,7 @@ define <16 x i8> @f32to16sc(<16 x float>
define <16 x i16> @f32to16ss(<16 x float> %f) {
; ALL-LABEL: f32to16ss:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttps2dq %zmm0, %zmm0
; ALL-NEXT: vpmovdw %zmm0, %ymm0
; ALL-NEXT: retq
@@ -453,7 +453,7 @@ define <16 x i16> @f32to16ss(<16 x float
define <16 x i32> @f32to16ui(<16 x float> %a) nounwind {
; ALL-LABEL: f32to16ui:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttps2udq %zmm0, %zmm0
; ALL-NEXT: retq
%b = fptoui <16 x float> %a to <16 x i32>
@@ -462,7 +462,7 @@ define <16 x i32> @f32to16ui(<16 x float
define <16 x i8> @f32to16uc(<16 x float> %f) {
; ALL-LABEL: f32to16uc:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttps2dq %zmm0, %zmm0
; ALL-NEXT: vpmovdb %zmm0, %xmm0
; ALL-NEXT: vzeroupper
@@ -473,7 +473,7 @@ define <16 x i8> @f32to16uc(<16 x float>
define <16 x i16> @f32to16us(<16 x float> %f) {
; ALL-LABEL: f32to16us:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttps2dq %zmm0, %zmm0
; ALL-NEXT: vpmovdw %zmm0, %ymm0
; ALL-NEXT: retq
@@ -483,14 +483,14 @@ define <16 x i16> @f32to16us(<16 x float
define <8 x i32> @f32to8ui(<8 x float> %a) nounwind {
; NOVL-LABEL: f32to8ui:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVL-NEXT: vcvttps2udq %zmm0, %zmm0
; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NOVL-NEXT: retq
;
; VL-LABEL: f32to8ui:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcvttps2udq %ymm0, %ymm0
; VL-NEXT: retq
%b = fptoui <8 x float> %a to <8 x i32>
@@ -499,7 +499,7 @@ define <8 x i32> @f32to8ui(<8 x float> %
define <4 x i32> @f32to4ui(<4 x float> %a) nounwind {
; NOVL-LABEL: f32to4ui:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; NOVL-NEXT: vcvttps2udq %zmm0, %zmm0
; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -507,7 +507,7 @@ define <4 x i32> @f32to4ui(<4 x float> %
; NOVL-NEXT: retq
;
; VL-LABEL: f32to4ui:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcvttps2udq %xmm0, %xmm0
; VL-NEXT: retq
%b = fptoui <4 x float> %a to <4 x i32>
@@ -516,7 +516,7 @@ define <4 x i32> @f32to4ui(<4 x float> %
define <8 x i32> @f64to8ui(<8 x double> %a) nounwind {
; ALL-LABEL: f64to8ui:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttpd2udq %zmm0, %ymm0
; ALL-NEXT: retq
%b = fptoui <8 x double> %a to <8 x i32>
@@ -525,7 +525,7 @@ define <8 x i32> @f64to8ui(<8 x double>
define <8 x i16> @f64to8us(<8 x double> %f) {
; NOVL-LABEL: f64to8us:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vcvttpd2dq %zmm0, %ymm0
; NOVL-NEXT: vpmovdw %zmm0, %ymm0
; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -533,7 +533,7 @@ define <8 x i16> @f64to8us(<8 x double>
; NOVL-NEXT: retq
;
; VL-LABEL: f64to8us:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcvttpd2dq %zmm0, %ymm0
; VL-NEXT: vpmovdw %ymm0, %xmm0
; VL-NEXT: vzeroupper
@@ -544,7 +544,7 @@ define <8 x i16> @f64to8us(<8 x double>
define <8 x i8> @f64to8uc(<8 x double> %f) {
; NOVL-LABEL: f64to8uc:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vcvttpd2dq %zmm0, %ymm0
; NOVL-NEXT: vpmovdw %zmm0, %ymm0
; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -552,7 +552,7 @@ define <8 x i8> @f64to8uc(<8 x double> %
; NOVL-NEXT: retq
;
; VL-LABEL: f64to8uc:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcvttpd2dq %zmm0, %ymm0
; VL-NEXT: vpmovdw %ymm0, %xmm0
; VL-NEXT: vzeroupper
@@ -563,7 +563,7 @@ define <8 x i8> @f64to8uc(<8 x double> %
define <4 x i32> @f64to4ui(<4 x double> %a) nounwind {
; NOVL-LABEL: f64to4ui:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVL-NEXT: vcvttpd2udq %zmm0, %ymm0
; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
@@ -571,7 +571,7 @@ define <4 x i32> @f64to4ui(<4 x double>
; NOVL-NEXT: retq
;
; VL-LABEL: f64to4ui:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcvttpd2udq %ymm0, %xmm0
; VL-NEXT: vzeroupper
; VL-NEXT: retq
@@ -581,7 +581,7 @@ define <4 x i32> @f64to4ui(<4 x double>
define <8 x double> @sito8f64(<8 x i32> %a) {
; ALL-LABEL: sito8f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtdq2pd %ymm0, %zmm0
; ALL-NEXT: retq
%b = sitofp <8 x i32> %a to <8 x double>
@@ -589,31 +589,31 @@ define <8 x double> @sito8f64(<8 x i32>
}
define <8 x double> @i32to8f64_mask(<8 x double> %a, <8 x i32> %b, i8 %c) nounwind {
; KNL-LABEL: i32to8f64_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
; KNL-NEXT: retq
;
; VLBW-LABEL: i32to8f64_mask:
-; VLBW: # BB#0:
+; VLBW: # %bb.0:
; VLBW-NEXT: kmovd %edi, %k1
; VLBW-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
; VLBW-NEXT: retq
;
; VLNOBW-LABEL: i32to8f64_mask:
-; VLNOBW: # BB#0:
+; VLNOBW: # %bb.0:
; VLNOBW-NEXT: kmovw %edi, %k1
; VLNOBW-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
; VLNOBW-NEXT: retq
;
; AVX512DQ-LABEL: i32to8f64_mask:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k1
; AVX512DQ-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: i32to8f64_mask:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vcvtdq2pd %ymm1, %zmm0 {%k1}
; AVX512BW-NEXT: retq
@@ -624,31 +624,31 @@ define <8 x double> @i32to8f64_mask(<8 x
}
define <8 x double> @sito8f64_maskz(<8 x i32> %a, i8 %b) nounwind {
; KNL-LABEL: sito8f64_maskz:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; VLBW-LABEL: sito8f64_maskz:
-; VLBW: # BB#0:
+; VLBW: # %bb.0:
; VLBW-NEXT: kmovd %edi, %k1
; VLBW-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
; VLBW-NEXT: retq
;
; VLNOBW-LABEL: sito8f64_maskz:
-; VLNOBW: # BB#0:
+; VLNOBW: # %bb.0:
; VLNOBW-NEXT: kmovw %edi, %k1
; VLNOBW-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
; VLNOBW-NEXT: retq
;
; AVX512DQ-LABEL: sito8f64_maskz:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k1
; AVX512DQ-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: sito8f64_maskz:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
@@ -660,7 +660,7 @@ define <8 x double> @sito8f64_maskz(<8 x
define <8 x i32> @f64to8si(<8 x double> %a) {
; ALL-LABEL: f64to8si:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttpd2dq %zmm0, %ymm0
; ALL-NEXT: retq
%b = fptosi <8 x double> %a to <8 x i32>
@@ -669,7 +669,7 @@ define <8 x i32> @f64to8si(<8 x double>
define <4 x i32> @f64to4si(<4 x double> %a) {
; ALL-LABEL: f64to4si:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttpd2dq %ymm0, %xmm0
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -679,7 +679,7 @@ define <4 x i32> @f64to4si(<4 x double>
define <16 x float> @f64to16f32(<16 x double> %b) nounwind {
; ALL-LABEL: f64to16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtpd2ps %zmm0, %ymm0
; ALL-NEXT: vcvtpd2ps %zmm1, %ymm1
; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
@@ -690,7 +690,7 @@ define <16 x float> @f64to16f32(<16 x do
define <4 x float> @f64to4f32(<4 x double> %b) {
; ALL-LABEL: f64to4f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtpd2ps %ymm0, %xmm0
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
@@ -700,7 +700,7 @@ define <4 x float> @f64to4f32(<4 x doubl
define <4 x float> @f64to4f32_mask(<4 x double> %b, <4 x i1> %mask) {
; NOVL-LABEL: f64to4f32_mask:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vpslld $31, %xmm1, %xmm1
; NOVL-NEXT: vpsrad $31, %xmm1, %xmm1
; NOVL-NEXT: vcvtpd2ps %ymm0, %xmm0
@@ -709,7 +709,7 @@ define <4 x float> @f64to4f32_mask(<4 x
; NOVL-NEXT: retq
;
; VL-LABEL: f64to4f32_mask:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vpslld $31, %xmm1, %xmm1
; VL-NEXT: vptestmd %xmm1, %xmm1, %k1
; VL-NEXT: vcvtpd2ps %ymm0, %xmm0 {%k1} {z}
@@ -722,7 +722,7 @@ define <4 x float> @f64to4f32_mask(<4 x
define <4 x float> @f64tof32_inreg(<2 x double> %a0, <4 x float> %a1) nounwind {
; ALL-LABEL: f64tof32_inreg:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtsd2ss %xmm0, %xmm1, %xmm0
; ALL-NEXT: retq
%ext = extractelement <2 x double> %a0, i32 0
@@ -733,7 +733,7 @@ define <4 x float> @f64tof32_inreg(<2 x
define <8 x double> @f32to8f64(<8 x float> %b) nounwind {
; ALL-LABEL: f32to8f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtps2pd %ymm0, %zmm0
; ALL-NEXT: retq
%a = fpext <8 x float> %b to <8 x double>
@@ -742,14 +742,14 @@ define <8 x double> @f32to8f64(<8 x floa
define <4 x double> @f32to4f64_mask(<4 x float> %b, <4 x double> %b1, <4 x double> %a1) {
; NOVL-LABEL: f32to4f64_mask:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vcvtps2pd %xmm0, %ymm0
; NOVL-NEXT: vcmpltpd %ymm2, %ymm1, %ymm1
; NOVL-NEXT: vandpd %ymm0, %ymm1, %ymm0
; NOVL-NEXT: retq
;
; VL-LABEL: f32to4f64_mask:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcmpltpd %ymm2, %ymm1, %k1
; VL-NEXT: vcvtps2pd %xmm0, %ymm0 {%k1} {z}
; VL-NEXT: retq
@@ -761,7 +761,7 @@ define <4 x double> @f32to4f64_mask(<4 x
define <2 x double> @f32tof64_inreg(<2 x double> %a0, <4 x float> %a1) nounwind {
; ALL-LABEL: f32tof64_inreg:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtss2sd %xmm1, %xmm0, %xmm0
; ALL-NEXT: retq
%ext = extractelement <4 x float> %a1, i32 0
@@ -772,7 +772,7 @@ define <2 x double> @f32tof64_inreg(<2 x
define double @sltof64_load(i64* nocapture %e) {
; ALL-LABEL: sltof64_load:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vcvtsi2sdq (%rdi), %xmm0, %xmm0
; ALL-NEXT: retq
entry:
@@ -783,7 +783,7 @@ entry:
define double @sitof64_load(i32* %e) {
; ALL-LABEL: sitof64_load:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vcvtsi2sdl (%rdi), %xmm0, %xmm0
; ALL-NEXT: retq
entry:
@@ -794,7 +794,7 @@ entry:
define float @sitof32_load(i32* %e) {
; ALL-LABEL: sitof32_load:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vcvtsi2ssl (%rdi), %xmm0, %xmm0
; ALL-NEXT: retq
entry:
@@ -805,7 +805,7 @@ entry:
define float @sltof32_load(i64* %e) {
; ALL-LABEL: sltof32_load:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vcvtsi2ssq (%rdi), %xmm0, %xmm0
; ALL-NEXT: retq
entry:
@@ -816,7 +816,7 @@ entry:
define void @f32tof64_loadstore() {
; ALL-LABEL: f32tof64_loadstore:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; ALL-NEXT: vmovsd %xmm0, -{{[0-9]+}}(%rsp)
@@ -832,7 +832,7 @@ entry:
define void @f64tof32_loadstore() nounwind uwtable {
; ALL-LABEL: f64tof32_loadstore:
-; ALL: # BB#0: # %entry
+; ALL: # %bb.0: # %entry
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
; ALL-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp)
@@ -848,7 +848,7 @@ entry:
define double @long_to_double(i64 %x) {
; ALL-LABEL: long_to_double:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovq %rdi, %xmm0
; ALL-NEXT: retq
%res = bitcast i64 %x to double
@@ -857,7 +857,7 @@ define double @long_to_double(i64 %x) {
define i64 @double_to_long(double %x) {
; ALL-LABEL: double_to_long:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovq %xmm0, %rax
; ALL-NEXT: retq
%res = bitcast double %x to i64
@@ -866,7 +866,7 @@ define i64 @double_to_long(double %x) {
define float @int_to_float(i32 %x) {
; ALL-LABEL: int_to_float:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovd %edi, %xmm0
; ALL-NEXT: retq
%res = bitcast i32 %x to float
@@ -875,7 +875,7 @@ define float @int_to_float(i32 %x) {
define i32 @float_to_int(float %x) {
; ALL-LABEL: float_to_int:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vmovd %xmm0, %eax
; ALL-NEXT: retq
%res = bitcast float %x to i32
@@ -884,7 +884,7 @@ define i32 @float_to_int(float %x) {
define <16 x double> @uito16f64(<16 x i32> %a) nounwind {
; ALL-LABEL: uito16f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtudq2pd %ymm0, %zmm2
; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; ALL-NEXT: vcvtudq2pd %ymm0, %zmm1
@@ -896,7 +896,7 @@ define <16 x double> @uito16f64(<16 x i3
define <8 x float> @slto8f32(<8 x i64> %a) {
; NODQ-LABEL: slto8f32:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; NODQ-NEXT: vpextrq $1, %xmm1, %rax
; NODQ-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm2
@@ -926,7 +926,7 @@ define <8 x float> @slto8f32(<8 x i64> %
; NODQ-NEXT: retq
;
; DQ-LABEL: slto8f32:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; DQ-NEXT: retq
%b = sitofp <8 x i64> %a to <8 x float>
@@ -935,7 +935,7 @@ define <8 x float> @slto8f32(<8 x i64> %
define <16 x float> @slto16f32(<16 x i64> %a) {
; NODQ-LABEL: slto16f32:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $2, %zmm1, %xmm2
; NODQ-NEXT: vpextrq $1, %xmm2, %rax
; NODQ-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3
@@ -992,7 +992,7 @@ define <16 x float> @slto16f32(<16 x i64
; NODQ-NEXT: retq
;
; DQ-LABEL: slto16f32:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; DQ-NEXT: vcvtqq2ps %zmm1, %ymm1
; DQ-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
@@ -1003,7 +1003,7 @@ define <16 x float> @slto16f32(<16 x i64
define <8 x double> @slto8f64(<8 x i64> %a) {
; NODQ-LABEL: slto8f64:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; NODQ-NEXT: vpextrq $1, %xmm1, %rax
; NODQ-NEXT: vcvtsi2sdq %rax, %xmm2, %xmm2
@@ -1033,7 +1033,7 @@ define <8 x double> @slto8f64(<8 x i64>
; NODQ-NEXT: retq
;
; DQ-LABEL: slto8f64:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; DQ-NEXT: retq
%b = sitofp <8 x i64> %a to <8 x double>
@@ -1042,7 +1042,7 @@ define <8 x double> @slto8f64(<8 x i64>
define <16 x double> @slto16f64(<16 x i64> %a) {
; NODQ-LABEL: slto16f64:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $3, %zmm0, %xmm2
; NODQ-NEXT: vpextrq $1, %xmm2, %rax
; NODQ-NEXT: vcvtsi2sdq %rax, %xmm3, %xmm3
@@ -1098,7 +1098,7 @@ define <16 x double> @slto16f64(<16 x i6
; NODQ-NEXT: retq
;
; DQ-LABEL: slto16f64:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
; DQ-NEXT: vcvtqq2pd %zmm1, %zmm1
; DQ-NEXT: retq
@@ -1108,7 +1108,7 @@ define <16 x double> @slto16f64(<16 x i6
define <8 x float> @ulto8f32(<8 x i64> %a) {
; NODQ-LABEL: ulto8f32:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; NODQ-NEXT: vpextrq $1, %xmm1, %rax
; NODQ-NEXT: vcvtusi2ssq %rax, %xmm2, %xmm2
@@ -1138,7 +1138,7 @@ define <8 x float> @ulto8f32(<8 x i64> %
; NODQ-NEXT: retq
;
; DQ-LABEL: ulto8f32:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; DQ-NEXT: retq
%b = uitofp <8 x i64> %a to <8 x float>
@@ -1147,7 +1147,7 @@ define <8 x float> @ulto8f32(<8 x i64> %
define <16 x float> @ulto16f32(<16 x i64> %a) {
; NODQ-LABEL: ulto16f32:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vextracti32x4 $2, %zmm1, %xmm2
; NODQ-NEXT: vpextrq $1, %xmm2, %rax
; NODQ-NEXT: vcvtusi2ssq %rax, %xmm3, %xmm3
@@ -1204,7 +1204,7 @@ define <16 x float> @ulto16f32(<16 x i64
; NODQ-NEXT: retq
;
; DQ-LABEL: ulto16f32:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; DQ-NEXT: vcvtuqq2ps %zmm1, %ymm1
; DQ-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
@@ -1215,31 +1215,31 @@ define <16 x float> @ulto16f32(<16 x i64
define <8 x double> @uito8f64_mask(<8 x double> %a, <8 x i32> %b, i8 %c) nounwind {
; KNL-LABEL: uito8f64_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
; KNL-NEXT: retq
;
; VLBW-LABEL: uito8f64_mask:
-; VLBW: # BB#0:
+; VLBW: # %bb.0:
; VLBW-NEXT: kmovd %edi, %k1
; VLBW-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
; VLBW-NEXT: retq
;
; VLNOBW-LABEL: uito8f64_mask:
-; VLNOBW: # BB#0:
+; VLNOBW: # %bb.0:
; VLNOBW-NEXT: kmovw %edi, %k1
; VLNOBW-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
; VLNOBW-NEXT: retq
;
; AVX512DQ-LABEL: uito8f64_mask:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k1
; AVX512DQ-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: uito8f64_mask:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vcvtudq2pd %ymm1, %zmm0 {%k1}
; AVX512BW-NEXT: retq
@@ -1250,31 +1250,31 @@ define <8 x double> @uito8f64_mask(<8 x
}
define <8 x double> @uito8f64_maskz(<8 x i32> %a, i8 %b) nounwind {
; KNL-LABEL: uito8f64_maskz:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; VLBW-LABEL: uito8f64_maskz:
-; VLBW: # BB#0:
+; VLBW: # %bb.0:
; VLBW-NEXT: kmovd %edi, %k1
; VLBW-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
; VLBW-NEXT: retq
;
; VLNOBW-LABEL: uito8f64_maskz:
-; VLNOBW: # BB#0:
+; VLNOBW: # %bb.0:
; VLNOBW-NEXT: kmovw %edi, %k1
; VLNOBW-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
; VLNOBW-NEXT: retq
;
; AVX512DQ-LABEL: uito8f64_maskz:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: kmovw %edi, %k1
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: uito8f64_maskz:
-; AVX512BW: # BB#0:
+; AVX512BW: # %bb.0:
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: retq
@@ -1286,14 +1286,14 @@ define <8 x double> @uito8f64_maskz(<8 x
define <4 x double> @uito4f64(<4 x i32> %a) nounwind {
; NOVL-LABEL: uito4f64:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; NOVL-NEXT: vcvtudq2pd %ymm0, %zmm0
; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NOVL-NEXT: retq
;
; VL-LABEL: uito4f64:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcvtudq2pd %xmm0, %ymm0
; VL-NEXT: retq
%b = uitofp <4 x i32> %a to <4 x double>
@@ -1302,7 +1302,7 @@ define <4 x double> @uito4f64(<4 x i32>
define <16 x float> @uito16f32(<16 x i32> %a) nounwind {
; ALL-LABEL: uito16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtudq2ps %zmm0, %zmm0
; ALL-NEXT: retq
%b = uitofp <16 x i32> %a to <16 x float>
@@ -1311,7 +1311,7 @@ define <16 x float> @uito16f32(<16 x i32
define <8 x double> @uito8f64(<8 x i32> %a) {
; ALL-LABEL: uito8f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtudq2pd %ymm0, %zmm0
; ALL-NEXT: retq
%b = uitofp <8 x i32> %a to <8 x double>
@@ -1320,14 +1320,14 @@ define <8 x double> @uito8f64(<8 x i32>
define <8 x float> @uito8f32(<8 x i32> %a) nounwind {
; NOVL-LABEL: uito8f32:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVL-NEXT: vcvtudq2ps %zmm0, %zmm0
; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NOVL-NEXT: retq
;
; VL-LABEL: uito8f32:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcvtudq2ps %ymm0, %ymm0
; VL-NEXT: retq
%b = uitofp <8 x i32> %a to <8 x float>
@@ -1336,7 +1336,7 @@ define <8 x float> @uito8f32(<8 x i32> %
define <4 x float> @uito4f32(<4 x i32> %a) nounwind {
; NOVL-LABEL: uito4f32:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; NOVL-NEXT: vcvtudq2ps %zmm0, %zmm0
; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
@@ -1344,7 +1344,7 @@ define <4 x float> @uito4f32(<4 x i32> %
; NOVL-NEXT: retq
;
; VL-LABEL: uito4f32:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vcvtudq2ps %xmm0, %xmm0
; VL-NEXT: retq
%b = uitofp <4 x i32> %a to <4 x float>
@@ -1353,7 +1353,7 @@ define <4 x float> @uito4f32(<4 x i32> %
define i32 @fptosi(float %a) nounwind {
; ALL-LABEL: fptosi:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttss2si %xmm0, %eax
; ALL-NEXT: retq
%b = fptosi float %a to i32
@@ -1362,7 +1362,7 @@ define i32 @fptosi(float %a) nounwind {
define i32 @fptoui(float %a) nounwind {
; ALL-LABEL: fptoui:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvttss2usi %xmm0, %eax
; ALL-NEXT: retq
%b = fptoui float %a to i32
@@ -1371,7 +1371,7 @@ define i32 @fptoui(float %a) nounwind {
define float @uitof32(i32 %a) nounwind {
; ALL-LABEL: uitof32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtusi2ssl %edi, %xmm0, %xmm0
; ALL-NEXT: retq
%b = uitofp i32 %a to float
@@ -1380,7 +1380,7 @@ define float @uitof32(i32 %a) nounwind {
define double @uitof64(i32 %a) nounwind {
; ALL-LABEL: uitof64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtusi2sdl %edi, %xmm0, %xmm0
; ALL-NEXT: retq
%b = uitofp i32 %a to double
@@ -1389,7 +1389,7 @@ define double @uitof64(i32 %a) nounwind
define <16 x float> @sbto16f32(<16 x i32> %a) {
; NODQ-LABEL: sbto16f32:
-; NODQ: # BB#0:
+; NODQ: # %bb.0:
; NODQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NODQ-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; NODQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1397,7 +1397,7 @@ define <16 x float> @sbto16f32(<16 x i32
; NODQ-NEXT: retq
;
; DQ-LABEL: sbto16f32:
-; DQ: # BB#0:
+; DQ: # %bb.0:
; DQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; DQ-NEXT: vpcmpgtd %zmm0, %zmm1, %k0
; DQ-NEXT: vpmovm2d %k0, %zmm0
@@ -1410,7 +1410,7 @@ define <16 x float> @sbto16f32(<16 x i32
define <16 x float> @scto16f32(<16 x i8> %a) {
; ALL-LABEL: scto16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxbd %xmm0, %zmm0
; ALL-NEXT: vcvtdq2ps %zmm0, %zmm0
; ALL-NEXT: retq
@@ -1420,7 +1420,7 @@ define <16 x float> @scto16f32(<16 x i8>
define <16 x float> @ssto16f32(<16 x i16> %a) {
; ALL-LABEL: ssto16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwd %ymm0, %zmm0
; ALL-NEXT: vcvtdq2ps %zmm0, %zmm0
; ALL-NEXT: retq
@@ -1430,7 +1430,7 @@ define <16 x float> @ssto16f32(<16 x i16
define <8 x double> @ssto16f64(<8 x i16> %a) {
; ALL-LABEL: ssto16f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwd %xmm0, %ymm0
; ALL-NEXT: vcvtdq2pd %ymm0, %zmm0
; ALL-NEXT: retq
@@ -1440,7 +1440,7 @@ define <8 x double> @ssto16f64(<8 x i16>
define <8 x double> @scto8f64(<8 x i8> %a) {
; ALL-LABEL: scto8f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; ALL-NEXT: vpslld $24, %ymm0, %ymm0
; ALL-NEXT: vpsrad $24, %ymm0, %ymm0
@@ -1452,7 +1452,7 @@ define <8 x double> @scto8f64(<8 x i8> %
define <16 x double> @scto16f64(<16 x i8> %a) {
; ALL-LABEL: scto16f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxbd %xmm0, %zmm1
; ALL-NEXT: vcvtdq2pd %ymm1, %zmm0
; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm1
@@ -1464,7 +1464,7 @@ define <16 x double> @scto16f64(<16 x i8
define <16 x double> @sbto16f64(<16 x double> %a) {
; NOVLDQ-LABEL: sbto16f64:
-; NOVLDQ: # BB#0:
+; NOVLDQ: # %bb.0:
; NOVLDQ-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; NOVLDQ-NEXT: vcmpltpd %zmm1, %zmm2, %k1
; NOVLDQ-NEXT: vcmpltpd %zmm0, %zmm2, %k2
@@ -1477,7 +1477,7 @@ define <16 x double> @sbto16f64(<16 x do
; NOVLDQ-NEXT: retq
;
; VLDQ-LABEL: sbto16f64:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; VLDQ-NEXT: vcmpltpd %zmm1, %zmm2, %k0
; VLDQ-NEXT: vcmpltpd %zmm0, %zmm2, %k1
@@ -1488,7 +1488,7 @@ define <16 x double> @sbto16f64(<16 x do
; VLDQ-NEXT: retq
;
; VLNODQ-LABEL: sbto16f64:
-; VLNODQ: # BB#0:
+; VLNODQ: # %bb.0:
; VLNODQ-NEXT: vpxor %xmm2, %xmm2, %xmm2
; VLNODQ-NEXT: vcmpltpd %zmm1, %zmm2, %k1
; VLNODQ-NEXT: vcmpltpd %zmm0, %zmm2, %k2
@@ -1500,7 +1500,7 @@ define <16 x double> @sbto16f64(<16 x do
; VLNODQ-NEXT: retq
;
; AVX512DQ-LABEL: sbto16f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX512DQ-NEXT: vcmpltpd %zmm1, %zmm2, %k0
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm2, %k1
@@ -1516,7 +1516,7 @@ define <16 x double> @sbto16f64(<16 x do
define <8 x double> @sbto8f64(<8 x double> %a) {
; NOVLDQ-LABEL: sbto8f64:
-; NOVLDQ: # BB#0:
+; NOVLDQ: # %bb.0:
; NOVLDQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1525,7 +1525,7 @@ define <8 x double> @sbto8f64(<8 x doubl
; NOVLDQ-NEXT: retq
;
; VLDQ-LABEL: sbto8f64:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; VLDQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; VLDQ-NEXT: vpmovm2d %k0, %ymm0
@@ -1533,7 +1533,7 @@ define <8 x double> @sbto8f64(<8 x doubl
; VLDQ-NEXT: retq
;
; VLNODQ-LABEL: sbto8f64:
-; VLNODQ: # BB#0:
+; VLNODQ: # %bb.0:
; VLNODQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLNODQ-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; VLNODQ-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -1542,7 +1542,7 @@ define <8 x double> @sbto8f64(<8 x doubl
; VLNODQ-NEXT: retq
;
; AVX512DQ-LABEL: sbto8f64:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
@@ -1555,7 +1555,7 @@ define <8 x double> @sbto8f64(<8 x doubl
define <8 x float> @sbto8f32(<8 x float> %a) {
; NOVLDQ-LABEL: sbto8f32:
-; NOVLDQ: # BB#0:
+; NOVLDQ: # %bb.0:
; NOVLDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vcmpltps %zmm0, %zmm1, %k1
@@ -1565,7 +1565,7 @@ define <8 x float> @sbto8f32(<8 x float>
; NOVLDQ-NEXT: retq
;
; VLDQ-LABEL: sbto8f32:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; VLDQ-NEXT: vcmpltps %ymm0, %ymm1, %k0
; VLDQ-NEXT: vpmovm2d %k0, %ymm0
@@ -1573,7 +1573,7 @@ define <8 x float> @sbto8f32(<8 x float>
; VLDQ-NEXT: retq
;
; VLNODQ-LABEL: sbto8f32:
-; VLNODQ: # BB#0:
+; VLNODQ: # %bb.0:
; VLNODQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLNODQ-NEXT: vcmpltps %ymm0, %ymm1, %k1
; VLNODQ-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
@@ -1582,7 +1582,7 @@ define <8 x float> @sbto8f32(<8 x float>
; VLNODQ-NEXT: retq
;
; AVX512DQ-LABEL: sbto8f32:
-; AVX512DQ: # BB#0:
+; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0
@@ -1596,14 +1596,14 @@ define <8 x float> @sbto8f32(<8 x float>
define <4 x float> @sbto4f32(<4 x float> %a) {
; NOVL-LABEL: sbto4f32:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; NOVL-NEXT: vcvtdq2ps %xmm0, %xmm0
; NOVL-NEXT: retq
;
; VLDQ-LABEL: sbto4f32:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; VLDQ-NEXT: vcmpltps %xmm0, %xmm1, %k0
; VLDQ-NEXT: vpmovm2d %k0, %xmm0
@@ -1611,7 +1611,7 @@ define <4 x float> @sbto4f32(<4 x float>
; VLDQ-NEXT: retq
;
; VLNODQ-LABEL: sbto4f32:
-; VLNODQ: # BB#0:
+; VLNODQ: # %bb.0:
; VLNODQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLNODQ-NEXT: vcmpltps %xmm0, %xmm1, %k1
; VLNODQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1625,7 +1625,7 @@ define <4 x float> @sbto4f32(<4 x float>
define <4 x double> @sbto4f64(<4 x double> %a) {
; NOVL-LABEL: sbto4f64:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; NOVL-NEXT: vpmovqd %zmm0, %ymm0
@@ -1633,7 +1633,7 @@ define <4 x double> @sbto4f64(<4 x doubl
; NOVL-NEXT: retq
;
; VLDQ-LABEL: sbto4f64:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; VLDQ-NEXT: vcmpltpd %ymm0, %ymm1, %k0
; VLDQ-NEXT: vpmovm2d %k0, %xmm0
@@ -1641,7 +1641,7 @@ define <4 x double> @sbto4f64(<4 x doubl
; VLDQ-NEXT: retq
;
; VLNODQ-LABEL: sbto4f64:
-; VLNODQ: # BB#0:
+; VLNODQ: # %bb.0:
; VLNODQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLNODQ-NEXT: vcmpltpd %ymm0, %ymm1, %k1
; VLNODQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1655,14 +1655,14 @@ define <4 x double> @sbto4f64(<4 x doubl
define <2 x float> @sbto2f32(<2 x float> %a) {
; NOVL-LABEL: sbto2f32:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; NOVL-NEXT: vcvtdq2ps %xmm0, %xmm0
; NOVL-NEXT: retq
;
; VLDQ-LABEL: sbto2f32:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; VLDQ-NEXT: vcmpltps %xmm0, %xmm1, %k0
; VLDQ-NEXT: vpmovm2d %k0, %xmm0
@@ -1670,7 +1670,7 @@ define <2 x float> @sbto2f32(<2 x float>
; VLDQ-NEXT: retq
;
; VLNODQ-LABEL: sbto2f32:
-; VLNODQ: # BB#0:
+; VLNODQ: # %bb.0:
; VLNODQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLNODQ-NEXT: vcmpltps %xmm0, %xmm1, %k1
; VLNODQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1684,7 +1684,7 @@ define <2 x float> @sbto2f32(<2 x float>
define <2 x double> @sbto2f64(<2 x double> %a) {
; NOVL-LABEL: sbto2f64:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; NOVL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1692,7 +1692,7 @@ define <2 x double> @sbto2f64(<2 x doubl
; NOVL-NEXT: retq
;
; VLDQ-LABEL: sbto2f64:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; VLDQ-NEXT: vcmpltpd %xmm0, %xmm1, %k0
; VLDQ-NEXT: vpmovm2q %k0, %xmm0
@@ -1700,7 +1700,7 @@ define <2 x double> @sbto2f64(<2 x doubl
; VLDQ-NEXT: retq
;
; VLNODQ-LABEL: sbto2f64:
-; VLNODQ: # BB#0:
+; VLNODQ: # %bb.0:
; VLNODQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLNODQ-NEXT: vcmpltpd %xmm0, %xmm1, %k1
; VLNODQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1718,7 +1718,7 @@ define <2 x double> @sbto2f64(<2 x doubl
define <16 x float> @ucto16f32(<16 x i8> %a) {
; ALL-LABEL: ucto16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; ALL-NEXT: vcvtdq2ps %zmm0, %zmm0
; ALL-NEXT: retq
@@ -1728,7 +1728,7 @@ define <16 x float> @ucto16f32(<16 x i8>
define <8 x double> @ucto8f64(<8 x i8> %a) {
; ALL-LABEL: ucto8f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; ALL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; ALL-NEXT: vcvtdq2pd %ymm0, %zmm0
@@ -1739,7 +1739,7 @@ define <8 x double> @ucto8f64(<8 x i8> %
define <16 x float> @swto16f32(<16 x i16> %a) {
; ALL-LABEL: swto16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwd %ymm0, %zmm0
; ALL-NEXT: vcvtdq2ps %zmm0, %zmm0
; ALL-NEXT: retq
@@ -1749,7 +1749,7 @@ define <16 x float> @swto16f32(<16 x i16
define <8 x double> @swto8f64(<8 x i16> %a) {
; ALL-LABEL: swto8f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwd %xmm0, %ymm0
; ALL-NEXT: vcvtdq2pd %ymm0, %zmm0
; ALL-NEXT: retq
@@ -1759,7 +1759,7 @@ define <8 x double> @swto8f64(<8 x i16>
define <16 x double> @swto16f64(<16 x i16> %a) {
; ALL-LABEL: swto16f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwd %ymm0, %zmm1
; ALL-NEXT: vcvtdq2pd %ymm1, %zmm0
; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm1
@@ -1771,7 +1771,7 @@ define <16 x double> @swto16f64(<16 x i1
define <16 x double> @ucto16f64(<16 x i8> %a) {
; ALL-LABEL: ucto16f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; ALL-NEXT: vcvtdq2pd %ymm1, %zmm0
; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm1
@@ -1783,7 +1783,7 @@ define <16 x double> @ucto16f64(<16 x i8
define <16 x float> @uwto16f32(<16 x i16> %a) {
; ALL-LABEL: uwto16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; ALL-NEXT: vcvtdq2ps %zmm0, %zmm0
; ALL-NEXT: retq
@@ -1793,7 +1793,7 @@ define <16 x float> @uwto16f32(<16 x i16
define <8 x double> @uwto8f64(<8 x i16> %a) {
; ALL-LABEL: uwto8f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; ALL-NEXT: vcvtdq2pd %ymm0, %zmm0
; ALL-NEXT: retq
@@ -1803,7 +1803,7 @@ define <8 x double> @uwto8f64(<8 x i16>
define <16 x double> @uwto16f64(<16 x i16> %a) {
; ALL-LABEL: uwto16f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; ALL-NEXT: vcvtdq2pd %ymm1, %zmm0
; ALL-NEXT: vextracti64x4 $1, %zmm1, %ymm1
@@ -1815,7 +1815,7 @@ define <16 x double> @uwto16f64(<16 x i1
define <16 x float> @sito16f32(<16 x i32> %a) {
; ALL-LABEL: sito16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtdq2ps %zmm0, %zmm0
; ALL-NEXT: retq
%b = sitofp <16 x i32> %a to <16 x float>
@@ -1824,7 +1824,7 @@ define <16 x float> @sito16f32(<16 x i32
define <16 x double> @sito16f64(<16 x i32> %a) {
; ALL-LABEL: sito16f64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtdq2pd %ymm0, %zmm2
; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; ALL-NEXT: vcvtdq2pd %ymm0, %zmm1
@@ -1836,7 +1836,7 @@ define <16 x double> @sito16f64(<16 x i3
define <16 x float> @usto16f32(<16 x i16> %a) {
; ALL-LABEL: usto16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; ALL-NEXT: vcvtdq2ps %zmm0, %zmm0
; ALL-NEXT: retq
@@ -1846,7 +1846,7 @@ define <16 x float> @usto16f32(<16 x i16
define <16 x float> @ubto16f32(<16 x i32> %a) {
; ALL-LABEL: ubto16f32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; ALL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; ALL-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
@@ -1859,7 +1859,7 @@ define <16 x float> @ubto16f32(<16 x i32
define <16 x double> @ubto16f64(<16 x i32> %a) {
; NOVL-LABEL: ubto16f64:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; NOVL-NEXT: movq {{.*}}(%rip), %rax
@@ -1873,7 +1873,7 @@ define <16 x double> @ubto16f64(<16 x i3
; NOVL-NEXT: retq
;
; VL-LABEL: ubto16f64:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; VL-NEXT: movl {{.*}}(%rip), %eax
@@ -1890,7 +1890,7 @@ define <16 x double> @ubto16f64(<16 x i3
define <8 x float> @ubto8f32(<8 x i32> %a) {
; NOVL-LABEL: ubto8f32:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
@@ -1901,7 +1901,7 @@ define <8 x float> @ubto8f32(<8 x i32> %
; NOVL-NEXT: retq
;
; VL-LABEL: ubto8f32:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VL-NEXT: vpcmpgtd %ymm0, %ymm1, %k1
; VL-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
@@ -1914,7 +1914,7 @@ define <8 x float> @ubto8f32(<8 x i32> %
define <8 x double> @ubto8f64(<8 x i32> %a) {
; NOVL-LABEL: ubto8f64:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
@@ -1924,7 +1924,7 @@ define <8 x double> @ubto8f64(<8 x i32>
; NOVL-NEXT: retq
;
; VL-LABEL: ubto8f64:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VL-NEXT: vpcmpgtd %ymm0, %ymm1, %k1
; VL-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
@@ -1937,7 +1937,7 @@ define <8 x double> @ubto8f64(<8 x i32>
define <4 x float> @ubto4f32(<4 x i32> %a) {
; NOVL-LABEL: ubto4f32:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NOVL-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
@@ -1945,7 +1945,7 @@ define <4 x float> @ubto4f32(<4 x i32> %
; NOVL-NEXT: retq
;
; VL-LABEL: ubto4f32:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VL-NEXT: vpcmpgtd %xmm0, %xmm1, %k1
; VL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
@@ -1958,7 +1958,7 @@ define <4 x float> @ubto4f32(<4 x i32> %
define <4 x double> @ubto4f64(<4 x i32> %a) {
; NOVL-LABEL: ubto4f64:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
; NOVL-NEXT: vpsrld $31, %xmm0, %xmm0
@@ -1966,7 +1966,7 @@ define <4 x double> @ubto4f64(<4 x i32>
; NOVL-NEXT: retq
;
; VL-LABEL: ubto4f64:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VL-NEXT: vpcmpgtd %xmm0, %xmm1, %k1
; VL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
@@ -1979,7 +1979,7 @@ define <4 x double> @ubto4f64(<4 x i32>
define <2 x float> @ubto2f32(<2 x i32> %a) {
; NOVL-LABEL: ubto2f32:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; NOVL-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
@@ -1993,7 +1993,7 @@ define <2 x float> @ubto2f32(<2 x i32> %
; NOVL-NEXT: retq
;
; VL-LABEL: ubto2f32:
-; VL: # BB#0:
+; VL: # %bb.0:
; VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; VL-NEXT: vpcmpltuq %xmm1, %xmm0, %k1
@@ -2007,7 +2007,7 @@ define <2 x float> @ubto2f32(<2 x i32> %
define <2 x double> @ubto2f64(<2 x i32> %a) {
; NOVL-LABEL: ubto2f64:
-; NOVL: # BB#0:
+; NOVL: # %bb.0:
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; NOVL-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
@@ -2015,7 +2015,7 @@ define <2 x double> @ubto2f64(<2 x i32>
; NOVL-NEXT: retq
;
; VLDQ-LABEL: ubto2f64:
-; VLDQ: # BB#0:
+; VLDQ: # %bb.0:
; VLDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLDQ-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; VLDQ-NEXT: vpcmpltuq %xmm1, %xmm0, %k1
@@ -2024,7 +2024,7 @@ define <2 x double> @ubto2f64(<2 x i32>
; VLDQ-NEXT: retq
;
; VLNODQ-LABEL: ubto2f64:
-; VLNODQ: # BB#0:
+; VLNODQ: # %bb.0:
; VLNODQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLNODQ-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; VLNODQ-NEXT: vpcmpltuq %xmm1, %xmm0, %k1
Modified: llvm/trunk/test/CodeGen/X86/avx512-ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-ext.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-ext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-ext.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define <8 x i16> @zext_8x8mem_to_8x16(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x8mem_to_8x16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; KNL-NEXT: vpsllw $15, %xmm0, %xmm0
; KNL-NEXT: vpsraw $15, %xmm0, %xmm0
@@ -12,7 +12,7 @@ define <8 x i16> @zext_8x8mem_to_8x16(<8
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x8mem_to_8x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovzxbw {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -25,7 +25,7 @@ define <8 x i16> @zext_8x8mem_to_8x16(<8
define <8 x i16> @sext_8x8mem_to_8x16(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_8x8mem_to_8x16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbw (%rdi), %xmm1
; KNL-NEXT: vpsllw $15, %xmm0, %xmm0
; KNL-NEXT: vpsraw $15, %xmm0, %xmm0
@@ -33,7 +33,7 @@ define <8 x i16> @sext_8x8mem_to_8x16(<8
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x8mem_to_8x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovsxbw (%rdi), %xmm0 {%k1} {z}
@@ -47,7 +47,7 @@ define <8 x i16> @sext_8x8mem_to_8x16(<8
define <16 x i16> @zext_16x8mem_to_16x16(<16 x i8> *%i , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_16x8mem_to_16x16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; KNL-NEXT: vpsllw $15, %ymm0, %ymm0
@@ -56,7 +56,7 @@ define <16 x i16> @zext_16x8mem_to_16x16
; KNL-NEXT: retq
;
; SKX-LABEL: zext_16x8mem_to_16x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
@@ -69,7 +69,7 @@ define <16 x i16> @zext_16x8mem_to_16x16
define <16 x i16> @sext_16x8mem_to_16x16(<16 x i8> *%i , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_16x8mem_to_16x16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbw (%rdi), %ymm1
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; KNL-NEXT: vpsllw $15, %ymm0, %ymm0
@@ -78,7 +78,7 @@ define <16 x i16> @sext_16x8mem_to_16x16
; KNL-NEXT: retq
;
; SKX-LABEL: sext_16x8mem_to_16x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vpmovsxbw (%rdi), %ymm0 {%k1} {z}
@@ -91,7 +91,7 @@ define <16 x i16> @sext_16x8mem_to_16x16
define <16 x i16> @zext_16x8_to_16x16(<16 x i8> %a ) nounwind readnone {
; ALL-LABEL: zext_16x8_to_16x16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; ALL-NEXT: retq
%x = zext <16 x i8> %a to <16 x i16>
@@ -100,7 +100,7 @@ define <16 x i16> @zext_16x8_to_16x16(<1
define <16 x i16> @zext_16x8_to_16x16_mask(<16 x i8> %a ,<16 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_16x8_to_16x16_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; KNL-NEXT: vpsllw $15, %ymm1, %ymm1
@@ -109,7 +109,7 @@ define <16 x i16> @zext_16x8_to_16x16_ma
; KNL-NEXT: retq
;
; SKX-LABEL: zext_16x8_to_16x16_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1
; SKX-NEXT: vpmovb2m %xmm1, %k1
; SKX-NEXT: vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -121,7 +121,7 @@ define <16 x i16> @zext_16x8_to_16x16_ma
define <16 x i16> @sext_16x8_to_16x16(<16 x i8> %a ) nounwind readnone {
; ALL-LABEL: sext_16x8_to_16x16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxbw %xmm0, %ymm0
; ALL-NEXT: retq
%x = sext <16 x i8> %a to <16 x i16>
@@ -130,7 +130,7 @@ define <16 x i16> @sext_16x8_to_16x16(<1
define <16 x i16> @sext_16x8_to_16x16_mask(<16 x i8> %a ,<16 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_16x8_to_16x16_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; KNL-NEXT: vpmovsxbw %xmm0, %ymm0
; KNL-NEXT: vpsllw $15, %ymm1, %ymm1
@@ -139,7 +139,7 @@ define <16 x i16> @sext_16x8_to_16x16_ma
; KNL-NEXT: retq
;
; SKX-LABEL: sext_16x8_to_16x16_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1
; SKX-NEXT: vpmovb2m %xmm1, %k1
; SKX-NEXT: vpmovsxbw %xmm0, %ymm0 {%k1} {z}
@@ -151,7 +151,7 @@ define <16 x i16> @sext_16x8_to_16x16_ma
define <32 x i16> @zext_32x8mem_to_32x16(<32 x i8> *%i , <32 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_32x8mem_to_32x16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -167,7 +167,7 @@ define <32 x i16> @zext_32x8mem_to_32x16
; KNL-NEXT: retq
;
; SKX-LABEL: zext_32x8mem_to_32x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %ymm0, %ymm0
; SKX-NEXT: vpmovb2m %ymm0, %k1
; SKX-NEXT: vpmovzxbw {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero,mem[16],zero,mem[17],zero,mem[18],zero,mem[19],zero,mem[20],zero,mem[21],zero,mem[22],zero,mem[23],zero,mem[24],zero,mem[25],zero,mem[26],zero,mem[27],zero,mem[28],zero,mem[29],zero,mem[30],zero,mem[31],zero
@@ -180,7 +180,7 @@ define <32 x i16> @zext_32x8mem_to_32x16
define <32 x i16> @sext_32x8mem_to_32x16(<32 x i8> *%i , <32 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_32x8mem_to_32x16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbw 16(%rdi), %ymm1
; KNL-NEXT: vpmovsxbw (%rdi), %ymm2
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -196,7 +196,7 @@ define <32 x i16> @sext_32x8mem_to_32x16
; KNL-NEXT: retq
;
; SKX-LABEL: sext_32x8mem_to_32x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %ymm0, %ymm0
; SKX-NEXT: vpmovb2m %ymm0, %k1
; SKX-NEXT: vpmovsxbw (%rdi), %zmm0 {%k1} {z}
@@ -209,7 +209,7 @@ define <32 x i16> @sext_32x8mem_to_32x16
define <32 x i16> @zext_32x8_to_32x16(<32 x i8> %a ) nounwind readnone {
; KNL-LABEL: zext_32x8_to_32x16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -217,7 +217,7 @@ define <32 x i16> @zext_32x8_to_32x16(<3
; KNL-NEXT: retq
;
; SKX-LABEL: zext_32x8_to_32x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; SKX-NEXT: retq
%x = zext <32 x i8> %a to <32 x i16>
@@ -226,7 +226,7 @@ define <32 x i16> @zext_32x8_to_32x16(<3
define <32 x i16> @zext_32x8_to_32x16_mask(<32 x i8> %a ,<32 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_32x8_to_32x16_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm2
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -242,7 +242,7 @@ define <32 x i16> @zext_32x8_to_32x16_ma
; KNL-NEXT: retq
;
; SKX-LABEL: zext_32x8_to_32x16_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %ymm1, %ymm1
; SKX-NEXT: vpmovb2m %ymm1, %k1
; SKX-NEXT: vpmovzxbw {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
@@ -254,7 +254,7 @@ define <32 x i16> @zext_32x8_to_32x16_ma
define <32 x i16> @sext_32x8_to_32x16(<32 x i8> %a ) nounwind readnone {
; KNL-LABEL: sext_32x8_to_32x16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbw %xmm0, %ymm2
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
; KNL-NEXT: vpmovsxbw %xmm0, %ymm1
@@ -262,7 +262,7 @@ define <32 x i16> @sext_32x8_to_32x16(<3
; KNL-NEXT: retq
;
; SKX-LABEL: sext_32x8_to_32x16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbw %ymm0, %zmm0
; SKX-NEXT: retq
%x = sext <32 x i8> %a to <32 x i16>
@@ -271,7 +271,7 @@ define <32 x i16> @sext_32x8_to_32x16(<3
define <32 x i16> @sext_32x8_to_32x16_mask(<32 x i8> %a ,<32 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_32x8_to_32x16_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm2
; KNL-NEXT: vpmovsxbw %xmm2, %ymm2
; KNL-NEXT: vpmovsxbw %xmm0, %ymm0
@@ -287,7 +287,7 @@ define <32 x i16> @sext_32x8_to_32x16_ma
; KNL-NEXT: retq
;
; SKX-LABEL: sext_32x8_to_32x16_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %ymm1, %ymm1
; SKX-NEXT: vpmovb2m %ymm1, %k1
; SKX-NEXT: vpmovsxbw %ymm0, %zmm0 {%k1} {z}
@@ -299,7 +299,7 @@ define <32 x i16> @sext_32x8_to_32x16_ma
define <4 x i32> @zext_4x8mem_to_4x32(<4 x i8> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_4x8mem_to_4x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -307,7 +307,7 @@ define <4 x i32> @zext_4x8mem_to_4x32(<4
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x8mem_to_4x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovzxbd {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -320,7 +320,7 @@ define <4 x i32> @zext_4x8mem_to_4x32(<4
define <4 x i32> @sext_4x8mem_to_4x32(<4 x i8> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_4x8mem_to_4x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovsxbd (%rdi), %xmm1
@@ -328,7 +328,7 @@ define <4 x i32> @sext_4x8mem_to_4x32(<4
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x8mem_to_4x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovsxbd (%rdi), %xmm0 {%k1} {z}
@@ -341,7 +341,7 @@ define <4 x i32> @sext_4x8mem_to_4x32(<4
define <8 x i32> @zext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x8mem_to_8x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -352,7 +352,7 @@ define <8 x i32> @zext_8x8mem_to_8x32(<8
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x8mem_to_8x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovzxbd {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -365,7 +365,7 @@ define <8 x i32> @zext_8x8mem_to_8x32(<8
define <8 x i32> @sext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_8x8mem_to_8x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -376,7 +376,7 @@ define <8 x i32> @sext_8x8mem_to_8x32(<8
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x8mem_to_8x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovsxbd (%rdi), %ymm0 {%k1} {z}
@@ -389,7 +389,7 @@ define <8 x i32> @sext_8x8mem_to_8x32(<8
define <16 x i32> @zext_16x8mem_to_16x32(<16 x i8> *%i , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_16x8mem_to_16x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
@@ -397,7 +397,7 @@ define <16 x i32> @zext_16x8mem_to_16x32
; KNL-NEXT: retq
;
; SKX-LABEL: zext_16x8mem_to_16x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vpmovzxbd {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
@@ -410,7 +410,7 @@ define <16 x i32> @zext_16x8mem_to_16x32
define <16 x i32> @sext_16x8mem_to_16x32(<16 x i8> *%i , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_16x8mem_to_16x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
@@ -418,7 +418,7 @@ define <16 x i32> @sext_16x8mem_to_16x32
; KNL-NEXT: retq
;
; SKX-LABEL: sext_16x8mem_to_16x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vpmovsxbd (%rdi), %zmm0 {%k1} {z}
@@ -431,7 +431,7 @@ define <16 x i32> @sext_16x8mem_to_16x32
define <16 x i32> @zext_16x8_to_16x32_mask(<16 x i8> %a , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_16x8_to_16x32_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1
@@ -439,7 +439,7 @@ define <16 x i32> @zext_16x8_to_16x32_ma
; KNL-NEXT: retq
;
; SKX-LABEL: zext_16x8_to_16x32_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1
; SKX-NEXT: vpmovb2m %xmm1, %k1
; SKX-NEXT: vpmovzxbd {{.*#+}} zmm0 {%k1} {z} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
@@ -451,7 +451,7 @@ define <16 x i32> @zext_16x8_to_16x32_ma
define <16 x i32> @sext_16x8_to_16x32_mask(<16 x i8> %a , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_16x8_to_16x32_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1
@@ -459,7 +459,7 @@ define <16 x i32> @sext_16x8_to_16x32_ma
; KNL-NEXT: retq
;
; SKX-LABEL: sext_16x8_to_16x32_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1
; SKX-NEXT: vpmovb2m %xmm1, %k1
; SKX-NEXT: vpmovsxbd %xmm0, %zmm0 {%k1} {z}
@@ -471,7 +471,7 @@ define <16 x i32> @sext_16x8_to_16x32_ma
define <16 x i32> @zext_16x8_to_16x32(<16 x i8> %i) nounwind readnone {
; ALL-LABEL: zext_16x8_to_16x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; ALL-NEXT: retq
%x = zext <16 x i8> %i to <16 x i32>
@@ -480,7 +480,7 @@ define <16 x i32> @zext_16x8_to_16x32(<1
define <16 x i32> @sext_16x8_to_16x32(<16 x i8> %i) nounwind readnone {
; ALL-LABEL: sext_16x8_to_16x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxbd %xmm0, %zmm0
; ALL-NEXT: retq
%x = sext <16 x i8> %i to <16 x i32>
@@ -489,7 +489,7 @@ define <16 x i32> @sext_16x8_to_16x32(<1
define <2 x i64> @zext_2x8mem_to_2x64(<2 x i8> *%i , <2 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_2x8mem_to_2x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
@@ -497,7 +497,7 @@ define <2 x i64> @zext_2x8mem_to_2x64(<2
; KNL-NEXT: retq
;
; SKX-LABEL: zext_2x8mem_to_2x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovzxbq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
@@ -509,7 +509,7 @@ define <2 x i64> @zext_2x8mem_to_2x64(<2
}
define <2 x i64> @sext_2x8mem_to_2x64mask(<2 x i8> *%i , <2 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_2x8mem_to_2x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovsxbq (%rdi), %xmm1
@@ -517,7 +517,7 @@ define <2 x i64> @sext_2x8mem_to_2x64mas
; KNL-NEXT: retq
;
; SKX-LABEL: sext_2x8mem_to_2x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovsxbq (%rdi), %xmm0 {%k1} {z}
@@ -529,7 +529,7 @@ define <2 x i64> @sext_2x8mem_to_2x64mas
}
define <2 x i64> @sext_2x8mem_to_2x64(<2 x i8> *%i) nounwind readnone {
; ALL-LABEL: sext_2x8mem_to_2x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxbq (%rdi), %xmm0
; ALL-NEXT: retq
%a = load <2 x i8>,<2 x i8> *%i,align 1
@@ -539,7 +539,7 @@ define <2 x i64> @sext_2x8mem_to_2x64(<2
define <4 x i64> @zext_4x8mem_to_4x64(<4 x i8> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_4x8mem_to_4x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -548,7 +548,7 @@ define <4 x i64> @zext_4x8mem_to_4x64(<4
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x8mem_to_4x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovzxbq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
@@ -561,7 +561,7 @@ define <4 x i64> @zext_4x8mem_to_4x64(<4
define <4 x i64> @sext_4x8mem_to_4x64mask(<4 x i8> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_4x8mem_to_4x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovsxdq %xmm0, %ymm0
@@ -570,7 +570,7 @@ define <4 x i64> @sext_4x8mem_to_4x64mas
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x8mem_to_4x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovsxbq (%rdi), %ymm0 {%k1} {z}
@@ -583,7 +583,7 @@ define <4 x i64> @sext_4x8mem_to_4x64mas
define <4 x i64> @sext_4x8mem_to_4x64(<4 x i8> *%i) nounwind readnone {
; ALL-LABEL: sext_4x8mem_to_4x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxbq (%rdi), %ymm0
; ALL-NEXT: retq
%a = load <4 x i8>,<4 x i8> *%i,align 1
@@ -593,7 +593,7 @@ define <4 x i64> @sext_4x8mem_to_4x64(<4
define <8 x i64> @zext_8x8mem_to_8x64(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x8mem_to_8x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -601,7 +601,7 @@ define <8 x i64> @zext_8x8mem_to_8x64(<8
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x8mem_to_8x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovzxbq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
@@ -614,7 +614,7 @@ define <8 x i64> @zext_8x8mem_to_8x64(<8
define <8 x i64> @sext_8x8mem_to_8x64mask(<8 x i8> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_8x8mem_to_8x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -622,7 +622,7 @@ define <8 x i64> @sext_8x8mem_to_8x64mas
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x8mem_to_8x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovsxbq (%rdi), %zmm0 {%k1} {z}
@@ -635,7 +635,7 @@ define <8 x i64> @sext_8x8mem_to_8x64mas
define <8 x i64> @sext_8x8mem_to_8x64(<8 x i8> *%i) nounwind readnone {
; ALL-LABEL: sext_8x8mem_to_8x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxbq (%rdi), %zmm0
; ALL-NEXT: retq
%a = load <8 x i8>,<8 x i8> *%i,align 1
@@ -645,7 +645,7 @@ define <8 x i64> @sext_8x8mem_to_8x64(<8
define <4 x i32> @zext_4x16mem_to_4x32(<4 x i16> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_4x16mem_to_4x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -653,7 +653,7 @@ define <4 x i32> @zext_4x16mem_to_4x32(<
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x16mem_to_4x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovzxwd {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -666,7 +666,7 @@ define <4 x i32> @zext_4x16mem_to_4x32(<
define <4 x i32> @sext_4x16mem_to_4x32mask(<4 x i16> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_4x16mem_to_4x32mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovsxwd (%rdi), %xmm1
@@ -674,7 +674,7 @@ define <4 x i32> @sext_4x16mem_to_4x32ma
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x16mem_to_4x32mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovsxwd (%rdi), %xmm0 {%k1} {z}
@@ -687,7 +687,7 @@ define <4 x i32> @sext_4x16mem_to_4x32ma
define <4 x i32> @sext_4x16mem_to_4x32(<4 x i16> *%i) nounwind readnone {
; ALL-LABEL: sext_4x16mem_to_4x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwd (%rdi), %xmm0
; ALL-NEXT: retq
%a = load <4 x i16>,<4 x i16> *%i,align 1
@@ -698,7 +698,7 @@ define <4 x i32> @sext_4x16mem_to_4x32(<
define <8 x i32> @zext_8x16mem_to_8x32(<8 x i16> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x16mem_to_8x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -709,7 +709,7 @@ define <8 x i32> @zext_8x16mem_to_8x32(<
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x16mem_to_8x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovzxwd {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -722,7 +722,7 @@ define <8 x i32> @zext_8x16mem_to_8x32(<
define <8 x i32> @sext_8x16mem_to_8x32mask(<8 x i16> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_8x16mem_to_8x32mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -733,7 +733,7 @@ define <8 x i32> @sext_8x16mem_to_8x32ma
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x16mem_to_8x32mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovsxwd (%rdi), %ymm0 {%k1} {z}
@@ -746,7 +746,7 @@ define <8 x i32> @sext_8x16mem_to_8x32ma
define <8 x i32> @sext_8x16mem_to_8x32(<8 x i16> *%i) nounwind readnone {
; ALL-LABEL: sext_8x16mem_to_8x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwd (%rdi), %ymm0
; ALL-NEXT: retq
%a = load <8 x i16>,<8 x i16> *%i,align 1
@@ -756,7 +756,7 @@ define <8 x i32> @sext_8x16mem_to_8x32(<
define <8 x i32> @zext_8x16_to_8x32mask(<8 x i16> %a , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x16_to_8x32mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
@@ -767,7 +767,7 @@ define <8 x i32> @zext_8x16_to_8x32mask(
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x16_to_8x32mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vpmovzxwd {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
@@ -779,7 +779,7 @@ define <8 x i32> @zext_8x16_to_8x32mask(
define <8 x i32> @zext_8x16_to_8x32(<8 x i16> %a ) nounwind readnone {
; ALL-LABEL: zext_8x16_to_8x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; ALL-NEXT: retq
%x = zext <8 x i16> %a to <8 x i32>
@@ -788,7 +788,7 @@ define <8 x i32> @zext_8x16_to_8x32(<8 x
define <16 x i32> @zext_16x16mem_to_16x32(<16 x i16> *%i , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_16x16mem_to_16x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
@@ -796,7 +796,7 @@ define <16 x i32> @zext_16x16mem_to_16x3
; KNL-NEXT: retq
;
; SKX-LABEL: zext_16x16mem_to_16x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vpmovzxwd {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
@@ -809,7 +809,7 @@ define <16 x i32> @zext_16x16mem_to_16x3
define <16 x i32> @sext_16x16mem_to_16x32mask(<16 x i16> *%i , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_16x16mem_to_16x32mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
@@ -817,7 +817,7 @@ define <16 x i32> @sext_16x16mem_to_16x3
; KNL-NEXT: retq
;
; SKX-LABEL: sext_16x16mem_to_16x32mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k1
; SKX-NEXT: vpmovsxwd (%rdi), %zmm0 {%k1} {z}
@@ -830,7 +830,7 @@ define <16 x i32> @sext_16x16mem_to_16x3
define <16 x i32> @sext_16x16mem_to_16x32(<16 x i16> *%i) nounwind readnone {
; ALL-LABEL: sext_16x16mem_to_16x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwd (%rdi), %zmm0
; ALL-NEXT: retq
%a = load <16 x i16>,<16 x i16> *%i,align 1
@@ -839,7 +839,7 @@ define <16 x i32> @sext_16x16mem_to_16x3
}
define <16 x i32> @zext_16x16_to_16x32mask(<16 x i16> %a , <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_16x16_to_16x32mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1
@@ -847,7 +847,7 @@ define <16 x i32> @zext_16x16_to_16x32ma
; KNL-NEXT: retq
;
; SKX-LABEL: zext_16x16_to_16x32mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1
; SKX-NEXT: vpmovb2m %xmm1, %k1
; SKX-NEXT: vpmovzxwd {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
@@ -859,7 +859,7 @@ define <16 x i32> @zext_16x16_to_16x32ma
define <16 x i32> @zext_16x16_to_16x32(<16 x i16> %a ) nounwind readnone {
; ALL-LABEL: zext_16x16_to_16x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; ALL-NEXT: retq
%x = zext <16 x i16> %a to <16 x i32>
@@ -868,7 +868,7 @@ define <16 x i32> @zext_16x16_to_16x32(<
define <2 x i64> @zext_2x16mem_to_2x64(<2 x i16> *%i , <2 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_2x16mem_to_2x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
@@ -876,7 +876,7 @@ define <2 x i64> @zext_2x16mem_to_2x64(<
; KNL-NEXT: retq
;
; SKX-LABEL: zext_2x16mem_to_2x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovzxwq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero
@@ -889,7 +889,7 @@ define <2 x i64> @zext_2x16mem_to_2x64(<
define <2 x i64> @sext_2x16mem_to_2x64mask(<2 x i16> *%i , <2 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_2x16mem_to_2x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovsxwq (%rdi), %xmm1
@@ -897,7 +897,7 @@ define <2 x i64> @sext_2x16mem_to_2x64ma
; KNL-NEXT: retq
;
; SKX-LABEL: sext_2x16mem_to_2x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovsxwq (%rdi), %xmm0 {%k1} {z}
@@ -910,7 +910,7 @@ define <2 x i64> @sext_2x16mem_to_2x64ma
define <2 x i64> @sext_2x16mem_to_2x64(<2 x i16> *%i) nounwind readnone {
; ALL-LABEL: sext_2x16mem_to_2x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwq (%rdi), %xmm0
; ALL-NEXT: retq
%a = load <2 x i16>,<2 x i16> *%i,align 1
@@ -920,7 +920,7 @@ define <2 x i64> @sext_2x16mem_to_2x64(<
define <4 x i64> @zext_4x16mem_to_4x64(<4 x i16> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_4x16mem_to_4x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -929,7 +929,7 @@ define <4 x i64> @zext_4x16mem_to_4x64(<
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x16mem_to_4x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovzxwq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
@@ -942,7 +942,7 @@ define <4 x i64> @zext_4x16mem_to_4x64(<
define <4 x i64> @sext_4x16mem_to_4x64mask(<4 x i16> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_4x16mem_to_4x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovsxdq %xmm0, %ymm0
@@ -951,7 +951,7 @@ define <4 x i64> @sext_4x16mem_to_4x64ma
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x16mem_to_4x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovsxwq (%rdi), %ymm0 {%k1} {z}
@@ -964,7 +964,7 @@ define <4 x i64> @sext_4x16mem_to_4x64ma
define <4 x i64> @sext_4x16mem_to_4x64(<4 x i16> *%i) nounwind readnone {
; ALL-LABEL: sext_4x16mem_to_4x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwq (%rdi), %ymm0
; ALL-NEXT: retq
%a = load <4 x i16>,<4 x i16> *%i,align 1
@@ -974,7 +974,7 @@ define <4 x i64> @sext_4x16mem_to_4x64(<
define <8 x i64> @zext_8x16mem_to_8x64(<8 x i16> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x16mem_to_8x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -982,7 +982,7 @@ define <8 x i64> @zext_8x16mem_to_8x64(<
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x16mem_to_8x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovzxwq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -995,7 +995,7 @@ define <8 x i64> @zext_8x16mem_to_8x64(<
define <8 x i64> @sext_8x16mem_to_8x64mask(<8 x i16> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_8x16mem_to_8x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -1003,7 +1003,7 @@ define <8 x i64> @sext_8x16mem_to_8x64ma
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x16mem_to_8x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovsxwq (%rdi), %zmm0 {%k1} {z}
@@ -1016,7 +1016,7 @@ define <8 x i64> @sext_8x16mem_to_8x64ma
define <8 x i64> @sext_8x16mem_to_8x64(<8 x i16> *%i) nounwind readnone {
; ALL-LABEL: sext_8x16mem_to_8x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxwq (%rdi), %zmm0
; ALL-NEXT: retq
%a = load <8 x i16>,<8 x i16> *%i,align 1
@@ -1026,7 +1026,7 @@ define <8 x i64> @sext_8x16mem_to_8x64(<
define <8 x i64> @zext_8x16_to_8x64mask(<8 x i16> %a , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x16_to_8x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
@@ -1034,7 +1034,7 @@ define <8 x i64> @zext_8x16_to_8x64mask(
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x16_to_8x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vpmovzxwq {{.*#+}} zmm0 {%k1} {z} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
@@ -1046,7 +1046,7 @@ define <8 x i64> @zext_8x16_to_8x64mask(
define <8 x i64> @zext_8x16_to_8x64(<8 x i16> %a) nounwind readnone {
; ALL-LABEL: zext_8x16_to_8x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; ALL-NEXT: retq
%ret = zext <8 x i16> %a to <8 x i64>
@@ -1055,7 +1055,7 @@ define <8 x i64> @zext_8x16_to_8x64(<8 x
define <2 x i64> @zext_2x32mem_to_2x64(<2 x i32> *%i , <2 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_2x32mem_to_2x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
@@ -1063,7 +1063,7 @@ define <2 x i64> @zext_2x32mem_to_2x64(<
; KNL-NEXT: retq
;
; SKX-LABEL: zext_2x32mem_to_2x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovzxdq {{.*#+}} xmm0 {%k1} {z} = mem[0],zero,mem[1],zero
@@ -1076,7 +1076,7 @@ define <2 x i64> @zext_2x32mem_to_2x64(<
define <2 x i64> @sext_2x32mem_to_2x64mask(<2 x i32> *%i , <2 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_2x32mem_to_2x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpsllq $63, %xmm0, %xmm0
; KNL-NEXT: vpsraq $63, %zmm0, %zmm0
; KNL-NEXT: vpmovsxdq (%rdi), %xmm1
@@ -1084,7 +1084,7 @@ define <2 x i64> @sext_2x32mem_to_2x64ma
; KNL-NEXT: retq
;
; SKX-LABEL: sext_2x32mem_to_2x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovsxdq (%rdi), %xmm0 {%k1} {z}
@@ -1097,7 +1097,7 @@ define <2 x i64> @sext_2x32mem_to_2x64ma
define <2 x i64> @sext_2x32mem_to_2x64(<2 x i32> *%i) nounwind readnone {
; ALL-LABEL: sext_2x32mem_to_2x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxdq (%rdi), %xmm0
; ALL-NEXT: retq
%a = load <2 x i32>,<2 x i32> *%i,align 1
@@ -1107,7 +1107,7 @@ define <2 x i64> @sext_2x32mem_to_2x64(<
define <4 x i64> @zext_4x32mem_to_4x64(<4 x i32> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_4x32mem_to_4x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -1116,7 +1116,7 @@ define <4 x i64> @zext_4x32mem_to_4x64(<
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x32mem_to_4x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovzxdq {{.*#+}} ymm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
@@ -1129,7 +1129,7 @@ define <4 x i64> @zext_4x32mem_to_4x64(<
define <4 x i64> @sext_4x32mem_to_4x64mask(<4 x i32> *%i , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_4x32mem_to_4x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: vpmovsxdq %xmm0, %ymm0
@@ -1138,7 +1138,7 @@ define <4 x i64> @sext_4x32mem_to_4x64ma
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x32mem_to_4x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpmovsxdq (%rdi), %ymm0 {%k1} {z}
@@ -1151,7 +1151,7 @@ define <4 x i64> @sext_4x32mem_to_4x64ma
define <4 x i64> @sext_4x32mem_to_4x64(<4 x i32> *%i) nounwind readnone {
; ALL-LABEL: sext_4x32mem_to_4x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxdq (%rdi), %ymm0
; ALL-NEXT: retq
%a = load <4 x i32>,<4 x i32> *%i,align 1
@@ -1161,7 +1161,7 @@ define <4 x i64> @sext_4x32mem_to_4x64(<
define <4 x i64> @sext_4x32_to_4x64(<4 x i32> %a) nounwind readnone {
; ALL-LABEL: sext_4x32_to_4x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxdq %xmm0, %ymm0
; ALL-NEXT: retq
%x = sext <4 x i32> %a to <4 x i64>
@@ -1170,7 +1170,7 @@ define <4 x i64> @sext_4x32_to_4x64(<4 x
define <4 x i64> @zext_4x32_to_4x64mask(<4 x i32> %a , <4 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_4x32_to_4x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %xmm1, %xmm1
; KNL-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
@@ -1179,7 +1179,7 @@ define <4 x i64> @zext_4x32_to_4x64mask(
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x32_to_4x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1
; SKX-NEXT: vpmovzxdq {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -1191,7 +1191,7 @@ define <4 x i64> @zext_4x32_to_4x64mask(
define <8 x i64> @zext_8x32mem_to_8x64(<8 x i32> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x32mem_to_8x64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -1199,7 +1199,7 @@ define <8 x i64> @zext_8x32mem_to_8x64(<
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x32mem_to_8x64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovzxdq {{.*#+}} zmm0 {%k1} {z} = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
@@ -1212,7 +1212,7 @@ define <8 x i64> @zext_8x32mem_to_8x64(<
define <8 x i64> @sext_8x32mem_to_8x64mask(<8 x i32> *%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_8x32mem_to_8x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
@@ -1220,7 +1220,7 @@ define <8 x i64> @sext_8x32mem_to_8x64ma
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x32mem_to_8x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k1
; SKX-NEXT: vpmovsxdq (%rdi), %zmm0 {%k1} {z}
@@ -1233,7 +1233,7 @@ define <8 x i64> @sext_8x32mem_to_8x64ma
define <8 x i64> @sext_8x32mem_to_8x64(<8 x i32> *%i) nounwind readnone {
; ALL-LABEL: sext_8x32mem_to_8x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxdq (%rdi), %zmm0
; ALL-NEXT: retq
%a = load <8 x i32>,<8 x i32> *%i,align 1
@@ -1243,7 +1243,7 @@ define <8 x i64> @sext_8x32mem_to_8x64(<
define <8 x i64> @sext_8x32_to_8x64(<8 x i32> %a) nounwind readnone {
; ALL-LABEL: sext_8x32_to_8x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovsxdq %ymm0, %zmm0
; ALL-NEXT: retq
%x = sext <8 x i32> %a to <8 x i64>
@@ -1252,7 +1252,7 @@ define <8 x i64> @sext_8x32_to_8x64(<8 x
define <8 x i64> @zext_8x32_to_8x64mask(<8 x i32> %a , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x32_to_8x64mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
@@ -1260,7 +1260,7 @@ define <8 x i64> @zext_8x32_to_8x64mask(
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x32_to_8x64mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vpmovzxdq {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
@@ -1271,7 +1271,7 @@ define <8 x i64> @zext_8x32_to_8x64mask(
}
define <8 x float> @fptrunc_test(<8 x double> %a) nounwind readnone {
; ALL-LABEL: fptrunc_test:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtpd2ps %zmm0, %ymm0
; ALL-NEXT: retq
%b = fptrunc <8 x double> %a to <8 x float>
@@ -1280,7 +1280,7 @@ define <8 x float> @fptrunc_test(<8 x do
define <8 x double> @fpext_test(<8 x float> %a) nounwind readnone {
; ALL-LABEL: fpext_test:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vcvtps2pd %ymm0, %zmm0
; ALL-NEXT: retq
%b = fpext <8 x float> %a to <8 x double>
@@ -1289,13 +1289,13 @@ define <8 x double> @fpext_test(<8 x flo
define <16 x i32> @zext_16i1_to_16xi32(i16 %b) {
; KNL-LABEL: zext_16i1_to_16xi32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: zext_16i1_to_16xi32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -1306,13 +1306,13 @@ define <16 x i32> @zext_16i1_to_16xi32
define <8 x i64> @zext_8i1_to_8xi64(i8 %b) {
; KNL-LABEL: zext_8i1_to_8xi64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8i1_to_8xi64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -1323,7 +1323,7 @@ define <8 x i64> @zext_8i1_to_8xi64(i8
define i16 @trunc_16i8_to_16i1(<16 x i8> %a) {
; KNL-LABEL: trunc_16i8_to_16i1:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
@@ -1332,7 +1332,7 @@ define i16 @trunc_16i8_to_16i1(<16 x i8>
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_16i8_to_16i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
@@ -1345,7 +1345,7 @@ define i16 @trunc_16i8_to_16i1(<16 x i8>
define i16 @trunc_16i32_to_16i1(<16 x i32> %a) {
; KNL-LABEL: trunc_16i32_to_16i1:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -1353,7 +1353,7 @@ define i16 @trunc_16i32_to_16i1(<16 x i3
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_16i32_to_16i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %zmm0, %zmm0
; SKX-NEXT: vptestmd %zmm0, %zmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
@@ -1367,14 +1367,14 @@ define i16 @trunc_16i32_to_16i1(<16 x i3
define <4 x i32> @trunc_4i32_to_4i1(<4 x i32> %a, <4 x i32> %b) {
; KNL-LABEL: trunc_4i32_to_4i1:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
; KNL-NEXT: vpslld $31, %xmm0, %xmm0
; KNL-NEXT: vpsrad $31, %xmm0, %xmm0
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_4i32_to_4i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm0, %xmm0
; SKX-NEXT: vptestmd %xmm0, %xmm0, %k1
; SKX-NEXT: vpslld $31, %xmm1, %xmm0
@@ -1391,7 +1391,7 @@ define <4 x i32> @trunc_4i32_to_4i1(<4 x
define i8 @trunc_8i16_to_8i1(<8 x i16> %a) {
; KNL-LABEL: trunc_8i16_to_8i1:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
@@ -1400,7 +1400,7 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_8i16_to_8i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
@@ -1413,14 +1413,14 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %
define <8 x i32> @sext_8i1_8i32(<8 x i32> %a1, <8 x i32> %a2) nounwind {
; KNL-LABEL: sext_8i1_8i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; KNL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; KNL-NEXT: vpxor %ymm1, %ymm0, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8i1_8i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpled %ymm0, %ymm1, %k0
; SKX-NEXT: vpmovm2d %k0, %ymm0
; SKX-NEXT: retq
@@ -1433,7 +1433,7 @@ define <8 x i32> @sext_8i1_8i32(<8 x i32
define i16 @trunc_i32_to_i1(i32 %a) {
; KNL-LABEL: trunc_i32_to_i1:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: movw $-4, %ax
; KNL-NEXT: kmovw %eax, %k0
; KNL-NEXT: kshiftrw $1, %k0, %k0
@@ -1446,7 +1446,7 @@ define i16 @trunc_i32_to_i1(i32 %a) {
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_i32_to_i1:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: movw $-4, %ax
; SKX-NEXT: kmovd %eax, %k0
; SKX-NEXT: kshiftrw $1, %k0, %k0
@@ -1465,14 +1465,14 @@ define i16 @trunc_i32_to_i1(i32 %a) {
define <8 x i16> @sext_8i1_8i16(<8 x i32> %a1, <8 x i32> %a2) nounwind {
; KNL-LABEL: sext_8i1_8i16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8i1_8i16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtd %ymm0, %ymm1, %k0
; SKX-NEXT: vpmovm2w %k0, %xmm0
; SKX-NEXT: vzeroupper
@@ -1484,13 +1484,13 @@ define <8 x i16> @sext_8i1_8i16(<8 x i32
define <16 x i32> @sext_16i1_16i32(<16 x i32> %a1, <16 x i32> %a2) nounwind {
; KNL-LABEL: sext_16i1_16i32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: sext_16i1_16i32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtd %zmm0, %zmm1, %k0
; SKX-NEXT: vpmovm2d %k0, %zmm0
; SKX-NEXT: retq
@@ -1501,13 +1501,13 @@ define <16 x i32> @sext_16i1_16i32(<16 x
define <8 x i64> @sext_8i1_8i64(<8 x i32> %a1, <8 x i32> %a2) nounwind {
; KNL-LABEL: sext_8i1_8i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; KNL-NEXT: vpmovsxdq %ymm0, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8i1_8i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpgtd %ymm0, %ymm1, %k0
; SKX-NEXT: vpmovm2q %k0, %zmm0
; SKX-NEXT: retq
@@ -1518,13 +1518,13 @@ define <8 x i64> @sext_8i1_8i64(<8 x i32
define void @extload_v8i64(<8 x i8>* %a, <8 x i64>* %res) {
; KNL-LABEL: extload_v8i64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovsxbq (%rdi), %zmm0
; KNL-NEXT: vmovdqa64 %zmm0, (%rsi)
; KNL-NEXT: retq
;
; SKX-LABEL: extload_v8i64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpmovsxbq (%rdi), %zmm0
; SKX-NEXT: vmovdqa64 %zmm0, (%rsi)
; SKX-NEXT: vzeroupper
@@ -1537,7 +1537,7 @@ define void @extload_v8i64(<8 x i8>* %a,
define <64 x i16> @test21(<64 x i16> %x , <64 x i1> %mask) nounwind readnone {
; KNL-LABEL: test21:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero,xmm7[8],zero,xmm7[9],zero,xmm7[10],zero,xmm7[11],zero,xmm7[12],zero,xmm7[13],zero,xmm7[14],zero,xmm7[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero,xmm6[8],zero,xmm6[9],zero,xmm6[10],zero,xmm6[11],zero,xmm6[12],zero,xmm6[13],zero,xmm6[14],zero,xmm6[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
@@ -1557,7 +1557,7 @@ define <64 x i16> @test21(<64 x i16> %x
; KNL-NEXT: retq
;
; SKX-LABEL: test21:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %zmm2, %zmm2
; SKX-NEXT: vpmovb2m %zmm2, %k1
; SKX-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
@@ -1570,7 +1570,7 @@ define <64 x i16> @test21(<64 x i16> %x
define <16 x i16> @shuffle_zext_16x8_to_16x16(<16 x i8> %a) nounwind readnone {
; ALL-LABEL: shuffle_zext_16x8_to_16x16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; ALL-NEXT: retq
%1 = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <32 x i32> <i32 0, i32 16, i32 1, i32 16, i32 2, i32 16, i32 3, i32 16, i32 4, i32 16, i32 5, i32 16, i32 6, i32 16, i32 7, i32 16, i32 8, i32 16, i32 9, i32 16, i32 10, i32 16, i32 11, i32 16, i32 12, i32 16, i32 13, i32 16, i32 14, i32 16, i32 15, i32 16>
@@ -1580,7 +1580,7 @@ define <16 x i16> @shuffle_zext_16x8_to_
define <16 x i16> @shuffle_zext_16x8_to_16x16_mask(<16 x i8> %a, <16 x i1> %mask) nounwind readnone {
; KNL-LABEL: shuffle_zext_16x8_to_16x16_mask:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; KNL-NEXT: vpsllw $15, %ymm1, %ymm1
@@ -1589,7 +1589,7 @@ define <16 x i16> @shuffle_zext_16x8_to_
; KNL-NEXT: retq
;
; SKX-LABEL: shuffle_zext_16x8_to_16x16_mask:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm1, %xmm1
; SKX-NEXT: vpmovb2m %xmm1, %k1
; SKX-NEXT: vpmovzxbw {{.*#+}} ymm0 {%k1} {z} = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
@@ -1602,7 +1602,7 @@ define <16 x i16> @shuffle_zext_16x8_to_
define <16 x i16> @zext_32x8_to_16x16(<32 x i8> %a) {
; ALL-LABEL: zext_32x8_to_16x16:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; ALL-NEXT: retq
%1 = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 32, i32 1, i32 32, i32 2, i32 32, i32 3, i32 32, i32 4, i32 32, i32 5, i32 32, i32 6, i32 32, i32 7, i32 32, i32 8, i32 32, i32 9, i32 32, i32 10, i32 32, i32 11, i32 32, i32 12, i32 32, i32 13, i32 32, i32 14, i32 32, i32 15, i32 32>
@@ -1612,7 +1612,7 @@ define <16 x i16> @zext_32x8_to_16x16(<3
define <8 x i32> @zext_32x8_to_8x32(<32 x i8> %a) {
; ALL-LABEL: zext_32x8_to_8x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; ALL-NEXT: retq
%1 = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 32, i32 32, i32 32, i32 1, i32 32, i32 32, i32 32, i32 2, i32 32, i32 32, i32 32, i32 3, i32 32, i32 32, i32 32, i32 4, i32 32, i32 32, i32 32, i32 5, i32 32, i32 32, i32 32, i32 6, i32 32, i32 32, i32 32, i32 7, i32 32, i32 32, i32 32>
@@ -1622,7 +1622,7 @@ define <8 x i32> @zext_32x8_to_8x32(<32
define <4 x i64> @zext_32x8_to_4x64(<32 x i8> %a) {
; ALL-LABEL: zext_32x8_to_4x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
; ALL-NEXT: retq
%1 = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 1, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 2, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 3, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
@@ -1632,7 +1632,7 @@ define <4 x i64> @zext_32x8_to_4x64(<32
define <8 x i32> @zext_16x16_to_8x32(<16 x i16> %a) {
; ALL-LABEL: zext_16x16_to_8x32:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; ALL-NEXT: retq
%1 = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 1, i32 16, i32 2, i32 16, i32 3, i32 16, i32 4, i32 16, i32 5, i32 16, i32 6, i32 16, i32 7, i32 16>
@@ -1642,7 +1642,7 @@ define <8 x i32> @zext_16x16_to_8x32(<16
define <4 x i64> @zext_16x16_to_4x64(<16 x i16> %a) {
; ALL-LABEL: zext_16x16_to_4x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; ALL-NEXT: retq
%1 = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 16, i32 16, i32 1, i32 16, i32 16, i32 16, i32 2, i32 16, i32 16, i32 16, i32 3, i32 16, i32 16, i32 16>
@@ -1652,7 +1652,7 @@ define <4 x i64> @zext_16x16_to_4x64(<16
define <4 x i64> @zext_8x32_to_4x64(<8 x i32> %a) {
; ALL-LABEL: zext_8x32_to_4x64:
-; ALL: # BB#0:
+; ALL: # %bb.0:
; ALL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; ALL-NEXT: retq
%1 = shufflevector <8 x i32> %a, <8 x i32> zeroinitializer, <8 x i32> <i32 0, i32 8, i32 1, i32 8, i32 2, i32 8, i32 3, i32 8>
@@ -1662,7 +1662,7 @@ define <4 x i64> @zext_8x32_to_4x64(<8 x
define <64 x i8> @zext_64xi1_to_64xi8(<64 x i8> %x, <64 x i8> %y) #0 {
; KNL-LABEL: zext_64xi1_to_64xi8:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpcmpeqb %ymm2, %ymm0, %ymm0
; KNL-NEXT: vmovdqa {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; KNL-NEXT: vpand %ymm2, %ymm0, %ymm0
@@ -1671,7 +1671,7 @@ define <64 x i8> @zext_64xi1_to_64xi8(<6
; KNL-NEXT: retq
;
; SKX-LABEL: zext_64xi1_to_64xi8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqb %zmm1, %zmm0, %k1
; SKX-NEXT: vmovdqu8 {{.*}}(%rip), %zmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -1682,7 +1682,7 @@ define <64 x i8> @zext_64xi1_to_64xi8(<6
define <32 x i16> @zext_32xi1_to_32xi16(<32 x i16> %x, <32 x i16> %y) #0 {
; KNL-LABEL: zext_32xi1_to_32xi16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpcmpeqw %ymm2, %ymm0, %ymm0
; KNL-NEXT: vpsrlw $15, %ymm0, %ymm0
; KNL-NEXT: vpcmpeqw %ymm3, %ymm1, %ymm1
@@ -1690,7 +1690,7 @@ define <32 x i16> @zext_32xi1_to_32xi16(
; KNL-NEXT: retq
;
; SKX-LABEL: zext_32xi1_to_32xi16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; SKX-NEXT: vmovdqu16 {{.*}}(%rip), %zmm0 {%k1} {z}
; SKX-NEXT: retq
@@ -1701,13 +1701,13 @@ define <32 x i16> @zext_32xi1_to_32xi16(
define <16 x i16> @zext_16xi1_to_16xi16(<16 x i16> %x, <16 x i16> %y) #0 {
; KNL-LABEL: zext_16xi1_to_16xi16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpsrlw $15, %ymm0, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_16xi1_to_16xi16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; SKX-NEXT: vmovdqu16 {{.*}}(%rip), %ymm0 {%k1} {z}
; SKX-NEXT: retq
@@ -1719,7 +1719,7 @@ define <16 x i16> @zext_16xi1_to_16xi16(
define <32 x i8> @zext_32xi1_to_32xi8(<32 x i16> %x, <32 x i16> %y) #0 {
; KNL-LABEL: zext_32xi1_to_32xi8:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpcmpeqw %ymm2, %ymm0, %ymm0
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
; KNL-NEXT: vpmovdb %zmm0, %xmm0
@@ -1731,7 +1731,7 @@ define <32 x i8> @zext_32xi1_to_32xi8(<3
; KNL-NEXT: retq
;
; SKX-LABEL: zext_32xi1_to_32xi8:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; SKX-NEXT: vmovdqu8 {{.*}}(%rip), %ymm0 {%k1} {z}
; SKX-NEXT: retq
@@ -1742,7 +1742,7 @@ define <32 x i8> @zext_32xi1_to_32xi8(<3
define <4 x i32> @zext_4xi1_to_4x32(<4 x i8> %x, <4 x i8> %y) #0 {
; KNL-LABEL: zext_4xi1_to_4x32:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; KNL-NEXT: vpand %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -1751,7 +1751,7 @@ define <4 x i32> @zext_4xi1_to_4x32(<4 x
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4xi1_to_4x32:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SKX-NEXT: vpand %xmm2, %xmm1, %xmm1
; SKX-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -1765,7 +1765,7 @@ define <4 x i32> @zext_4xi1_to_4x32(<4 x
define <2 x i64> @zext_2xi1_to_2xi64(<2 x i8> %x, <2 x i8> %y) #0 {
; KNL-LABEL: zext_2xi1_to_2xi64:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; KNL-NEXT: vpand %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -1774,7 +1774,7 @@ define <2 x i64> @zext_2xi1_to_2xi64(<2
; KNL-NEXT: retq
;
; SKX-LABEL: zext_2xi1_to_2xi64:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
; SKX-NEXT: vpand %xmm2, %xmm1, %xmm1
; SKX-NEXT: vpand %xmm2, %xmm0, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/avx512-extract-subvector-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-extract-subvector-load-store.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-extract-subvector-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-extract-subvector-load-store.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define void @load_v8i1_broadcast_4_v2i1(<8 x i1>* %a0,<2 x double> %a1,<2 x double> %a2,<2 x double>* %a3) {
; AVX512-LABEL: load_v8i1_broadcast_4_v2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $4, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm2
@@ -15,7 +15,7 @@ define void @load_v8i1_broadcast_4_v2i1(
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v8i1_broadcast_4_v2i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $4, %k0, %k1
@@ -35,7 +35,7 @@ define void @load_v8i1_broadcast_4_v2i1(
}
define void @load_v8i1_broadcast_7_v2i1(<8 x i1>* %a0,<2 x double> %a1,<2 x double> %a2,<2 x double>* %a3) {
; AVX512-LABEL: load_v8i1_broadcast_7_v2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $6, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm2
@@ -46,7 +46,7 @@ define void @load_v8i1_broadcast_7_v2i1(
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v8i1_broadcast_7_v2i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $6, %k0, %k1
@@ -66,7 +66,7 @@ define void @load_v8i1_broadcast_7_v2i1(
}
define void @load_v16i1_broadcast_8_v2i1(<16 x i1>* %a0,<2 x double> %a1,<2 x double> %a2,<2 x double>* %a3) {
; AVX512-LABEL: load_v16i1_broadcast_8_v2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $8, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm2
@@ -77,7 +77,7 @@ define void @load_v16i1_broadcast_8_v2i1
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_8_v2i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $8, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -96,7 +96,7 @@ define void @load_v16i1_broadcast_8_v2i1
}
define void @load_v16i1_broadcast_8_v4i1(<16 x i1>* %a0,<4 x float> %a1,<4 x float> %a2,<4 x float>* %a3) {
; AVX512-LABEL: load_v16i1_broadcast_8_v4i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $8, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm2
@@ -107,7 +107,7 @@ define void @load_v16i1_broadcast_8_v4i1
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_8_v4i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $8, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -126,7 +126,7 @@ define void @load_v16i1_broadcast_8_v4i1
}
define void @load_v16i1_broadcast_15_v2i1(<16 x i1>* %a0,<2 x double> %a1,<2 x double> %a2,<2 x double>* %a3) {
; AVX512-LABEL: load_v16i1_broadcast_15_v2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $14, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm2
@@ -137,7 +137,7 @@ define void @load_v16i1_broadcast_15_v2i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_15_v2i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $14, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -156,7 +156,7 @@ define void @load_v16i1_broadcast_15_v2i
}
define void @load_v16i1_broadcast_15_v4i1(<16 x i1>* %a0,<4 x float> %a1,<4 x float> %a2,<4 x float>* %a3) {
; AVX512-LABEL: load_v16i1_broadcast_15_v4i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $12, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm2
@@ -167,7 +167,7 @@ define void @load_v16i1_broadcast_15_v4i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_15_v4i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $12, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -186,7 +186,7 @@ define void @load_v16i1_broadcast_15_v4i
}
define void @load_v32i1_broadcast_16_v2i1(<32 x i1>* %a0,<2 x double> %a1,<2 x double> %a2,<2 x double>* %a3) {
; AVX512-LABEL: load_v32i1_broadcast_16_v2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $16, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm2
@@ -197,7 +197,7 @@ define void @load_v32i1_broadcast_16_v2i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v2i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $16, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -216,7 +216,7 @@ define void @load_v32i1_broadcast_16_v2i
}
define void @load_v32i1_broadcast_16_v4i1(<32 x i1>* %a0,<4 x float> %a1,<4 x float> %a2,<4 x float>* %a3) {
; AVX512-LABEL: load_v32i1_broadcast_16_v4i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $16, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm2
@@ -227,7 +227,7 @@ define void @load_v32i1_broadcast_16_v4i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v4i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $16, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -246,7 +246,7 @@ define void @load_v32i1_broadcast_16_v4i
}
define void @load_v32i1_broadcast_16_v8i1(<32 x i1>* %a0,<8 x float> %a1,<8 x float> %a2,<8 x float>* %a3) {
; AVX512-LABEL: load_v32i1_broadcast_16_v8i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $16, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %zmm2
@@ -258,7 +258,7 @@ define void @load_v32i1_broadcast_16_v8i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v8i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $16, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
@@ -277,7 +277,7 @@ define void @load_v32i1_broadcast_16_v8i
}
define void @load_v32i1_broadcast_31_v2i1(<32 x i1>* %a0,<2 x double> %a1,<2 x double> %a2,<2 x double>* %a3) {
; AVX512-LABEL: load_v32i1_broadcast_31_v2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $30, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm2
@@ -288,7 +288,7 @@ define void @load_v32i1_broadcast_31_v2i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v2i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $30, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -307,7 +307,7 @@ define void @load_v32i1_broadcast_31_v2i
}
define void @load_v32i1_broadcast_31_v4i1(<32 x i1>* %a0,<4 x float> %a1,<4 x float> %a2,<4 x float>* %a3) {
; AVX512-LABEL: load_v32i1_broadcast_31_v4i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $28, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm2
@@ -318,7 +318,7 @@ define void @load_v32i1_broadcast_31_v4i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v4i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $28, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -337,7 +337,7 @@ define void @load_v32i1_broadcast_31_v4i
}
define void @load_v32i1_broadcast_31_v8i1(<32 x i1>* %a0,<8 x float> %a1,<8 x float> %a2,<8 x float>* %a3) {
; AVX512-LABEL: load_v32i1_broadcast_31_v8i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $24, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %zmm2
@@ -350,7 +350,7 @@ define void @load_v32i1_broadcast_31_v8i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v8i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $24, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
@@ -370,7 +370,7 @@ define void @load_v32i1_broadcast_31_v8i
}
define void @load_v64i1_broadcast_32_v2i1(<64 x i1>* %a0,<2 x double> %a1,<2 x double> %a2,<2 x double>* %a3) {
; AVX512-LABEL: load_v64i1_broadcast_32_v2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm2
@@ -381,7 +381,7 @@ define void @load_v64i1_broadcast_32_v2i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v2i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -400,7 +400,7 @@ define void @load_v64i1_broadcast_32_v2i
}
define void @load_v64i1_broadcast_32_v4i1(<64 x i1>* %a0,<4 x float> %a1,<4 x float> %a2,<4 x float>* %a3) {
; AVX512-LABEL: load_v64i1_broadcast_32_v4i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm2
@@ -411,7 +411,7 @@ define void @load_v64i1_broadcast_32_v4i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v4i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -430,7 +430,7 @@ define void @load_v64i1_broadcast_32_v4i
}
define void @load_v64i1_broadcast_32_v8i1(<64 x i1>* %a0,<8 x float> %a1,<8 x float> %a2,<8 x float>* %a3) {
; AVX512-LABEL: load_v64i1_broadcast_32_v8i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %zmm2
@@ -442,7 +442,7 @@ define void @load_v64i1_broadcast_32_v8i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v8i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
@@ -461,7 +461,7 @@ define void @load_v64i1_broadcast_32_v8i
}
define void @load_v64i1_broadcast_32_v16i1(<64 x i1>* %a0,<16 x float> %a1,<16 x float> %a2,<16 x float>* %a3) {
; AVX512-LABEL: load_v64i1_broadcast_32_v16i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %zmm2
@@ -473,7 +473,7 @@ define void @load_v64i1_broadcast_32_v16
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v16i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
@@ -492,7 +492,7 @@ define void @load_v64i1_broadcast_32_v16
}
define void @load_v64i1_broadcast_63_v2i1(<64 x i1>* %a0,<2 x double> %a1,<2 x double> %a2,<2 x double>* %a3) {
; AVX512-LABEL: load_v64i1_broadcast_63_v2i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $62, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm2
@@ -503,7 +503,7 @@ define void @load_v64i1_broadcast_63_v2i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v2i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $62, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -522,7 +522,7 @@ define void @load_v64i1_broadcast_63_v2i
}
define void @load_v64i1_broadcast_63_v4i1(<64 x i1>* %a0,<4 x float> %a1,<4 x float> %a2,<4 x float>* %a3) {
; AVX512-LABEL: load_v64i1_broadcast_63_v4i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $60, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm2
@@ -533,7 +533,7 @@ define void @load_v64i1_broadcast_63_v4i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v4i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $60, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -552,7 +552,7 @@ define void @load_v64i1_broadcast_63_v4i
}
define void @load_v64i1_broadcast_63_v8i1(<64 x i1>* %a0,<8 x float> %a1,<8 x float> %a2,<8 x float>* %a3) {
; AVX512-LABEL: load_v64i1_broadcast_63_v8i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $56, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %zmm2
@@ -565,7 +565,7 @@ define void @load_v64i1_broadcast_63_v8i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v8i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $56, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
@@ -585,7 +585,7 @@ define void @load_v64i1_broadcast_63_v8i
}
define void @load_v64i1_broadcast_63_v16i1(<64 x i1>* %a0,<16 x float> %a1,<16 x float> %a2,<16 x float>* %a3) {
; AVX512-LABEL: load_v64i1_broadcast_63_v16i1:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $48, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %zmm2
@@ -598,7 +598,7 @@ define void @load_v64i1_broadcast_63_v16
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v16i1:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $48, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
@@ -618,14 +618,14 @@ define void @load_v64i1_broadcast_63_v16
}
define void @load_v2i1_broadcast_1_v1i1_store(<2 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v2i1_broadcast_1_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $1, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v2i1_broadcast_1_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $1, %k0, %k0
@@ -639,14 +639,14 @@ define void @load_v2i1_broadcast_1_v1i1_
}
define void @load_v3i1_broadcast_1_v1i1_store(<3 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v3i1_broadcast_1_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $1, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v3i1_broadcast_1_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $1, %k0, %k0
@@ -660,14 +660,14 @@ define void @load_v3i1_broadcast_1_v1i1_
}
define void @load_v3i1_broadcast_2_v1i1_store(<3 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v3i1_broadcast_2_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $2, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v3i1_broadcast_2_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $2, %k0, %k0
@@ -681,14 +681,14 @@ define void @load_v3i1_broadcast_2_v1i1_
}
define void @load_v4i1_broadcast_2_v1i1_store(<4 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v4i1_broadcast_2_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $2, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v4i1_broadcast_2_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $2, %k0, %k0
@@ -702,14 +702,14 @@ define void @load_v4i1_broadcast_2_v1i1_
}
define void @load_v4i1_broadcast_3_v1i1_store(<4 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v4i1_broadcast_3_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $3, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v4i1_broadcast_3_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $3, %k0, %k0
@@ -723,14 +723,14 @@ define void @load_v4i1_broadcast_3_v1i1_
}
define void @load_v8i1_broadcast_4_v1i1_store(<8 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v8i1_broadcast_4_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $4, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v8i1_broadcast_4_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $4, %k0, %k0
@@ -744,7 +744,7 @@ define void @load_v8i1_broadcast_4_v1i1_
}
define void @load_v8i1_broadcast_4_v2i1_store(<8 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-LABEL: load_v8i1_broadcast_4_v2i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $4, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm0
@@ -754,7 +754,7 @@ define void @load_v8i1_broadcast_4_v2i1_
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v8i1_broadcast_4_v2i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $4, %k0, %k1
@@ -773,14 +773,14 @@ define void @load_v8i1_broadcast_4_v2i1_
}
define void @load_v8i1_broadcast_7_v1i1_store(<8 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v8i1_broadcast_7_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $7, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v8i1_broadcast_7_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $7, %k0, %k0
@@ -794,7 +794,7 @@ define void @load_v8i1_broadcast_7_v1i1_
}
define void @load_v8i1_broadcast_7_v2i1_store(<8 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-LABEL: load_v8i1_broadcast_7_v2i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovb (%rdi), %k0
; AVX512-NEXT: kshiftrw $6, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm0
@@ -804,7 +804,7 @@ define void @load_v8i1_broadcast_7_v2i1_
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v8i1_broadcast_7_v2i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax
; AVX512NOTDQ-NEXT: kmovd %eax, %k0
; AVX512NOTDQ-NEXT: kshiftrw $6, %k0, %k1
@@ -823,14 +823,14 @@ define void @load_v8i1_broadcast_7_v2i1_
}
define void @load_v16i1_broadcast_8_v1i1_store(<16 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v16i1_broadcast_8_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $8, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_8_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $8, %k0, %k0
; AVX512NOTDQ-NEXT: kmovd %k0, %eax
@@ -843,7 +843,7 @@ define void @load_v16i1_broadcast_8_v1i1
}
define void @load_v16i1_broadcast_8_v2i1_store(<16 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-LABEL: load_v16i1_broadcast_8_v2i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $8, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm0
@@ -853,7 +853,7 @@ define void @load_v16i1_broadcast_8_v2i1
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_8_v2i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $8, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -871,7 +871,7 @@ define void @load_v16i1_broadcast_8_v2i1
}
define void @load_v16i1_broadcast_8_v4i1_store(<16 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-LABEL: load_v16i1_broadcast_8_v4i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $8, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm0
@@ -881,7 +881,7 @@ define void @load_v16i1_broadcast_8_v4i1
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_8_v4i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $8, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -899,14 +899,14 @@ define void @load_v16i1_broadcast_8_v4i1
}
define void @load_v16i1_broadcast_15_v1i1_store(<16 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v16i1_broadcast_15_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $15, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_15_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $15, %k0, %k0
; AVX512NOTDQ-NEXT: kmovd %k0, %eax
@@ -919,7 +919,7 @@ define void @load_v16i1_broadcast_15_v1i
}
define void @load_v16i1_broadcast_15_v2i1_store(<16 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-LABEL: load_v16i1_broadcast_15_v2i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $14, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm0
@@ -929,7 +929,7 @@ define void @load_v16i1_broadcast_15_v2i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_15_v2i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $14, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -947,7 +947,7 @@ define void @load_v16i1_broadcast_15_v2i
}
define void @load_v16i1_broadcast_15_v4i1_store(<16 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-LABEL: load_v16i1_broadcast_15_v4i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovw (%rdi), %k0
; AVX512-NEXT: kshiftrw $12, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm0
@@ -957,7 +957,7 @@ define void @load_v16i1_broadcast_15_v4i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v16i1_broadcast_15_v4i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrw $12, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -975,14 +975,14 @@ define void @load_v16i1_broadcast_15_v4i
}
define void @load_v32i1_broadcast_16_v1i1_store(<32 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v32i1_broadcast_16_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $16, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $16, %k0, %k0
; AVX512NOTDQ-NEXT: kmovd %k0, %eax
@@ -995,7 +995,7 @@ define void @load_v32i1_broadcast_16_v1i
}
define void @load_v32i1_broadcast_16_v2i1_store(<32 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-LABEL: load_v32i1_broadcast_16_v2i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $16, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm0
@@ -1005,7 +1005,7 @@ define void @load_v32i1_broadcast_16_v2i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v2i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $16, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1023,7 +1023,7 @@ define void @load_v32i1_broadcast_16_v2i
}
define void @load_v32i1_broadcast_16_v4i1_store(<32 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-LABEL: load_v32i1_broadcast_16_v4i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $16, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm0
@@ -1033,7 +1033,7 @@ define void @load_v32i1_broadcast_16_v4i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v4i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $16, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1051,7 +1051,7 @@ define void @load_v32i1_broadcast_16_v4i
}
define void @load_v32i1_broadcast_16_v8i1_store(<32 x i1>* %a0,<8 x i1>* %a1) {
; AVX512-LABEL: load_v32i1_broadcast_16_v8i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $16, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %zmm0
@@ -1062,7 +1062,7 @@ define void @load_v32i1_broadcast_16_v8i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_16_v8i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $16, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1080,14 +1080,14 @@ define void @load_v32i1_broadcast_16_v8i
}
define void @load_v32i1_broadcast_31_v1i1_store(<32 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v32i1_broadcast_31_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $31, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $31, %k0, %k0
; AVX512NOTDQ-NEXT: kmovd %k0, %eax
@@ -1100,7 +1100,7 @@ define void @load_v32i1_broadcast_31_v1i
}
define void @load_v32i1_broadcast_31_v2i1_store(<32 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-LABEL: load_v32i1_broadcast_31_v2i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $30, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm0
@@ -1110,7 +1110,7 @@ define void @load_v32i1_broadcast_31_v2i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v2i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $30, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1128,7 +1128,7 @@ define void @load_v32i1_broadcast_31_v2i
}
define void @load_v32i1_broadcast_31_v4i1_store(<32 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-LABEL: load_v32i1_broadcast_31_v4i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $28, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm0
@@ -1138,7 +1138,7 @@ define void @load_v32i1_broadcast_31_v4i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v4i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $28, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1156,7 +1156,7 @@ define void @load_v32i1_broadcast_31_v4i
}
define void @load_v32i1_broadcast_31_v8i1_store(<32 x i1>* %a0,<8 x i1>* %a1) {
; AVX512-LABEL: load_v32i1_broadcast_31_v8i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovd (%rdi), %k0
; AVX512-NEXT: kshiftrd $24, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %zmm0
@@ -1168,7 +1168,7 @@ define void @load_v32i1_broadcast_31_v8i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v32i1_broadcast_31_v8i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovd (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrd $24, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1187,14 +1187,14 @@ define void @load_v32i1_broadcast_31_v8i
}
define void @load_v64i1_broadcast_32_v1i1_store(<64 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_32_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k0
; AVX512NOTDQ-NEXT: kmovd %k0, %eax
@@ -1207,7 +1207,7 @@ define void @load_v64i1_broadcast_32_v1i
}
define void @load_v64i1_broadcast_32_v2i1_store(<64 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_32_v2i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm0
@@ -1217,7 +1217,7 @@ define void @load_v64i1_broadcast_32_v2i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v2i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1235,7 +1235,7 @@ define void @load_v64i1_broadcast_32_v2i
}
define void @load_v64i1_broadcast_32_v4i1_store(<64 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_32_v4i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm0
@@ -1245,7 +1245,7 @@ define void @load_v64i1_broadcast_32_v4i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v4i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1263,7 +1263,7 @@ define void @load_v64i1_broadcast_32_v4i
}
define void @load_v64i1_broadcast_32_v8i1_store(<64 x i1>* %a0,<8 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_32_v8i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %zmm0
@@ -1274,7 +1274,7 @@ define void @load_v64i1_broadcast_32_v8i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v8i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1292,7 +1292,7 @@ define void @load_v64i1_broadcast_32_v8i
}
define void @load_v64i1_broadcast_32_v16i1_store(<64 x i1>* %a0,<16 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_32_v16i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $32, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %zmm0
@@ -1303,7 +1303,7 @@ define void @load_v64i1_broadcast_32_v16
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_32_v16i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $32, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1320,14 +1320,14 @@ define void @load_v64i1_broadcast_32_v16
}
define void @load_v64i1_broadcast_63_v1i1_store(<64 x i1>* %a0,<1 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_63_v1i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $63, %k0, %k0
; AVX512-NEXT: kmovb %k0, (%rsi)
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v1i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $63, %k0, %k0
; AVX512NOTDQ-NEXT: kmovd %k0, %eax
@@ -1340,7 +1340,7 @@ define void @load_v64i1_broadcast_63_v1i
}
define void @load_v64i1_broadcast_63_v2i1_store(<64 x i1>* %a0,<2 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_63_v2i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $62, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %xmm0
@@ -1350,7 +1350,7 @@ define void @load_v64i1_broadcast_63_v2i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v2i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $62, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1368,7 +1368,7 @@ define void @load_v64i1_broadcast_63_v2i
}
define void @load_v64i1_broadcast_63_v4i1_store(<64 x i1>* %a0,<4 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_63_v4i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $60, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %xmm0
@@ -1378,7 +1378,7 @@ define void @load_v64i1_broadcast_63_v4i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v4i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $60, %k0, %k1
; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
@@ -1396,7 +1396,7 @@ define void @load_v64i1_broadcast_63_v4i
}
define void @load_v64i1_broadcast_63_v8i1_store(<64 x i1>* %a0,<8 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_63_v8i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $56, %k0, %k0
; AVX512-NEXT: vpmovm2q %k0, %zmm0
@@ -1408,7 +1408,7 @@ define void @load_v64i1_broadcast_63_v8i
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v8i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $56, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1427,7 +1427,7 @@ define void @load_v64i1_broadcast_63_v8i
}
define void @load_v64i1_broadcast_63_v16i1_store(<64 x i1>* %a0,<16 x i1>* %a1) {
; AVX512-LABEL: load_v64i1_broadcast_63_v16i1_store:
-; AVX512: # BB#0:
+; AVX512: # %bb.0:
; AVX512-NEXT: kmovq (%rdi), %k0
; AVX512-NEXT: kshiftrq $48, %k0, %k0
; AVX512-NEXT: vpmovm2d %k0, %zmm0
@@ -1439,7 +1439,7 @@ define void @load_v64i1_broadcast_63_v16
; AVX512-NEXT: retq
;
; AVX512NOTDQ-LABEL: load_v64i1_broadcast_63_v16i1_store:
-; AVX512NOTDQ: # BB#0:
+; AVX512NOTDQ: # %bb.0:
; AVX512NOTDQ-NEXT: kmovq (%rdi), %k0
; AVX512NOTDQ-NEXT: kshiftrq $48, %k0, %k1
; AVX512NOTDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
Modified: llvm/trunk/test/CodeGen/X86/avx512-extract-subvector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-extract-subvector.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-extract-subvector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-extract-subvector.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define <8 x i16> @extract_subvector128_v32i16(<32 x i16> %x) nounwind {
; SKX-LABEL: extract_subvector128_v32i16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vextractf32x4 $2, %zmm0, %xmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -14,7 +14,7 @@ define <8 x i16> @extract_subvector128_v
define <8 x i16> @extract_subvector128_v32i16_first_element(<32 x i16> %x) nounwind {
; SKX-LABEL: extract_subvector128_v32i16_first_element:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -24,7 +24,7 @@ define <8 x i16> @extract_subvector128_v
define <16 x i8> @extract_subvector128_v64i8(<64 x i8> %x) nounwind {
; SKX-LABEL: extract_subvector128_v64i8:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vextractf32x4 $2, %zmm0, %xmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -34,7 +34,7 @@ define <16 x i8> @extract_subvector128_v
define <16 x i8> @extract_subvector128_v64i8_first_element(<64 x i8> %x) nounwind {
; SKX-LABEL: extract_subvector128_v64i8_first_element:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -45,7 +45,7 @@ define <16 x i8> @extract_subvector128_v
define <16 x i16> @extract_subvector256_v32i16(<32 x i16> %x) nounwind {
; SKX-LABEL: extract_subvector256_v32i16:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; SKX-NEXT: retq
%r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -54,7 +54,7 @@ define <16 x i16> @extract_subvector256_
define <32 x i8> @extract_subvector256_v64i8(<64 x i8> %x) nounwind {
; SKX-LABEL: extract_subvector256_v64i8:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; SKX-NEXT: retq
%r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
@@ -63,7 +63,7 @@ define <32 x i8> @extract_subvector256_v
define void @extract_subvector256_v8f64_store(double* nocapture %addr, <4 x double> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v8f64_store:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vextractf128 $1, %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -76,7 +76,7 @@ entry:
define void @extract_subvector256_v8f32_store(float* nocapture %addr, <8 x float> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v8f32_store:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vextractf128 $1, %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -89,7 +89,7 @@ entry:
define void @extract_subvector256_v4i64_store(i64* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v4i64_store:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vextractf128 $1, %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -102,7 +102,7 @@ entry:
define void @extract_subvector256_v8i32_store(i32* nocapture %addr, <8 x i32> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v8i32_store:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vextractf128 $1, %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -115,7 +115,7 @@ entry:
define void @extract_subvector256_v16i16_store(i16* nocapture %addr, <16 x i16> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v16i16_store:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vextractf128 $1, %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -128,7 +128,7 @@ entry:
define void @extract_subvector256_v32i8_store(i8* nocapture %addr, <32 x i8> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v32i8_store:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vextractf128 $1, %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -141,7 +141,7 @@ entry:
define void @extract_subvector256_v4f64_store_lo(double* nocapture %addr, <4 x double> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v4f64_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -154,7 +154,7 @@ entry:
define void @extract_subvector256_v4f64_store_lo_align_16(double* nocapture %addr, <4 x double> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v4f64_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -167,7 +167,7 @@ entry:
define void @extract_subvector256_v4f32_store_lo(float* nocapture %addr, <8 x float> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v4f32_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -180,7 +180,7 @@ entry:
define void @extract_subvector256_v4f32_store_lo_align_16(float* nocapture %addr, <8 x float> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v4f32_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -193,7 +193,7 @@ entry:
define void @extract_subvector256_v2i64_store_lo(i64* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v2i64_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -206,7 +206,7 @@ entry:
define void @extract_subvector256_v2i64_store_lo_align_16(i64* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v2i64_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -219,7 +219,7 @@ entry:
define void @extract_subvector256_v4i32_store_lo(i32* nocapture %addr, <8 x i32> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v4i32_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -232,7 +232,7 @@ entry:
define void @extract_subvector256_v4i32_store_lo_align_16(i32* nocapture %addr, <8 x i32> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v4i32_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -245,7 +245,7 @@ entry:
define void @extract_subvector256_v8i16_store_lo(i16* nocapture %addr, <16 x i16> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v8i16_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -258,7 +258,7 @@ entry:
define void @extract_subvector256_v8i16_store_lo_align_16(i16* nocapture %addr, <16 x i16> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v8i16_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -271,7 +271,7 @@ entry:
define void @extract_subvector256_v16i8_store_lo(i8* nocapture %addr, <32 x i8> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v16i8_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -284,7 +284,7 @@ entry:
define void @extract_subvector256_v16i8_store_lo_align_16(i8* nocapture %addr, <32 x i8> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector256_v16i8_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -297,7 +297,7 @@ entry:
define void @extract_subvector512_v2f64_store_lo(double* nocapture %addr, <8 x double> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v2f64_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -310,7 +310,7 @@ entry:
define void @extract_subvector512_v2f64_store_lo_align_16(double* nocapture %addr, <8 x double> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v2f64_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -323,7 +323,7 @@ entry:
define void @extract_subvector512_v4f32_store_lo(float* nocapture %addr, <16 x float> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4f32_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -336,7 +336,7 @@ entry:
define void @extract_subvector512_v4f32_store_lo_align_16(float* nocapture %addr, <16 x float> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4f32_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -349,7 +349,7 @@ entry:
define void @extract_subvector512_v2i64_store_lo(i64* nocapture %addr, <8 x i64> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v2i64_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -362,7 +362,7 @@ entry:
define void @extract_subvector512_v2i64_store_lo_align_16(i64* nocapture %addr, <8 x i64> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v2i64_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -375,7 +375,7 @@ entry:
define void @extract_subvector512_v4i32_store_lo(i32* nocapture %addr, <16 x i32> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4i32_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -388,7 +388,7 @@ entry:
define void @extract_subvector512_v4i32_store_lo_align_16(i32* nocapture %addr, <16 x i32> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4i32_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -401,7 +401,7 @@ entry:
define void @extract_subvector512_v8i16_store_lo(i16* nocapture %addr, <32 x i16> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v8i16_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -414,7 +414,7 @@ entry:
define void @extract_subvector512_v16i8_store_lo(i8* nocapture %addr, <64 x i8> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v16i8_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -427,7 +427,7 @@ entry:
define void @extract_subvector512_v16i8_store_lo_align_16(i8* nocapture %addr, <64 x i8> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v16i8_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %xmm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -440,7 +440,7 @@ entry:
define void @extract_subvector512_v4f64_store_lo(double* nocapture %addr, <8 x double> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4f64_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -453,7 +453,7 @@ entry:
define void @extract_subvector512_v4f64_store_lo_align_16(double* nocapture %addr, <8 x double> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4f64_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -466,7 +466,7 @@ entry:
define void @extract_subvector512_v4f64_store_lo_align_32(double* nocapture %addr, <8 x double> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4f64_store_lo_align_32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -479,7 +479,7 @@ entry:
define void @extract_subvector512_v8f32_store_lo(float* nocapture %addr, <16 x float> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v8f32_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -492,7 +492,7 @@ entry:
define void @extract_subvector512_v8f32_store_lo_align_16(float* nocapture %addr, <16 x float> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v8f32_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -505,7 +505,7 @@ entry:
define void @extract_subvector512_v8f32_store_lo_align_32(float* nocapture %addr, <16 x float> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v8f32_store_lo_align_32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -518,7 +518,7 @@ entry:
define void @extract_subvector512_v4i64_store_lo(i64* nocapture %addr, <8 x i64> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4i64_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -531,7 +531,7 @@ entry:
define void @extract_subvector512_v4i64_store_lo_align_16(i64* nocapture %addr, <8 x i64> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4i64_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -544,7 +544,7 @@ entry:
define void @extract_subvector512_v4i64_store_lo_align_32(i64* nocapture %addr, <8 x i64> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v4i64_store_lo_align_32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -557,7 +557,7 @@ entry:
define void @extract_subvector512_v8i32_store_lo(i32* nocapture %addr, <16 x i32> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v8i32_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -570,7 +570,7 @@ entry:
define void @extract_subvector512_v8i32_store_lo_align_16(i32* nocapture %addr, <16 x i32> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v8i32_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -583,7 +583,7 @@ entry:
define void @extract_subvector512_v8i32_store_lo_align_32(i32* nocapture %addr, <16 x i32> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v8i32_store_lo_align_32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -596,7 +596,7 @@ entry:
define void @extract_subvector512_v16i16_store_lo(i16* nocapture %addr, <32 x i16> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v16i16_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -609,7 +609,7 @@ entry:
define void @extract_subvector512_v16i16_store_lo_align_16(i16* nocapture %addr, <32 x i16> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v16i16_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -622,7 +622,7 @@ entry:
define void @extract_subvector512_v16i16_store_lo_align_32(i16* nocapture %addr, <32 x i16> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v16i16_store_lo_align_32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -635,7 +635,7 @@ entry:
define void @extract_subvector512_v32i8_store_lo(i8* nocapture %addr, <64 x i8> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v32i8_store_lo:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -648,7 +648,7 @@ entry:
define void @extract_subvector512_v32i8_store_lo_align_16(i8* nocapture %addr, <64 x i8> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v32i8_store_lo_align_16:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovups %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -661,7 +661,7 @@ entry:
define void @extract_subvector512_v32i8_store_lo_align_32(i8* nocapture %addr, <64 x i8> %a) nounwind uwtable ssp {
; SKX-LABEL: extract_subvector512_v32i8_store_lo_align_32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: vmovaps %ymm0, (%rdi)
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -674,7 +674,7 @@ entry:
define <4 x double> @test_mm512_mask_extractf64x4_pd(<4 x double> %__W, i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_mask_extractf64x4_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x4 $1, %zmm1, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -688,7 +688,7 @@ entry:
define <4 x double> @test_mm512_maskz_extractf64x4_pd(i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_maskz_extractf64x4_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x4 $1, %zmm0, %ymm0 {%k1} {z}
; SKX-NEXT: retq
@@ -702,7 +702,7 @@ entry:
define <4 x float> @test_mm512_mask_extractf32x4_ps(<4 x float> %__W, i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_mask_extractf32x4_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x4 $1, %zmm1, %xmm0 {%k1}
; SKX-NEXT: vzeroupper
@@ -718,7 +718,7 @@ entry:
define <4 x float> @test_mm512_maskz_extractf32x4_ps(i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_maskz_extractf32x4_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x4 $1, %zmm0, %xmm0 {%k1} {z}
; SKX-NEXT: vzeroupper
@@ -734,7 +734,7 @@ entry:
define <2 x double> @test_mm256_mask_extractf64x2_pd(<2 x double> %__W, i8 %__U, <4 x double> %__A) {
; SKX-LABEL: test_mm256_mask_extractf64x2_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x2 $1, %ymm1, %xmm0 {%k1}
; SKX-NEXT: vzeroupper
@@ -749,7 +749,7 @@ entry:
define <2 x double> @test_mm256_maskz_extractf64x2_pd(i8 %__U, <4 x double> %__A) {
; SKX-LABEL: test_mm256_maskz_extractf64x2_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x2 $1, %ymm0, %xmm0 {%k1} {z}
; SKX-NEXT: vzeroupper
@@ -764,7 +764,7 @@ entry:
define <2 x i64> @test_mm256_mask_extracti64x2_epi64(<2 x i64> %__W, i8 %__U, <4 x i64> %__A) {
; SKX-LABEL: test_mm256_mask_extracti64x2_epi64:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextracti64x2 $1, %ymm1, %xmm0 {%k1}
; SKX-NEXT: vzeroupper
@@ -779,7 +779,7 @@ entry:
define <2 x i64> @test_mm256_maskz_extracti64x2_epi64(i8 %__U, <4 x i64> %__A) {
; SKX-LABEL: test_mm256_maskz_extracti64x2_epi64:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextracti64x2 $1, %ymm0, %xmm0 {%k1} {z}
; SKX-NEXT: vzeroupper
@@ -794,7 +794,7 @@ entry:
define <4 x float> @test_mm256_mask_extractf32x4_ps(<4 x float> %__W, i8 %__U, <8 x float> %__A) {
; SKX-LABEL: test_mm256_mask_extractf32x4_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x4 $1, %ymm1, %xmm0 {%k1}
; SKX-NEXT: vzeroupper
@@ -809,7 +809,7 @@ entry:
define <4 x float> @test_mm256_maskz_extractf32x4_ps(i8 %__U, <8 x float> %__A) {
; SKX-LABEL: test_mm256_maskz_extractf32x4_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x4 $1, %ymm0, %xmm0 {%k1} {z}
; SKX-NEXT: vzeroupper
@@ -824,7 +824,7 @@ entry:
define <2 x i64> @test_mm256_mask_extracti32x4_epi32(<2 x i64> %__W, i8 %__U, <4 x i64> %__A) {
; SKX-LABEL: test_mm256_mask_extracti32x4_epi32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextracti32x4 $1, %ymm1, %xmm0 {%k1}
; SKX-NEXT: vzeroupper
@@ -842,7 +842,7 @@ entry:
define <2 x i64> @test_mm256_maskz_extracti32x4_epi32(i8 %__U, <4 x i64> %__A) {
; SKX-LABEL: test_mm256_maskz_extracti32x4_epi32:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextracti32x4 $1, %ymm0, %xmm0 {%k1} {z}
; SKX-NEXT: vzeroupper
@@ -859,7 +859,7 @@ entry:
define <8 x float> @test_mm512_mask_extractf32x8_ps(<8 x float> %__W, i8 %__U, <16 x float> %__A) {
; SKX-LABEL: test_mm512_mask_extractf32x8_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x8 $1, %zmm1, %ymm0 {%k1}
; SKX-NEXT: retq
@@ -872,7 +872,7 @@ entry:
define <8 x float> @test_mm512_maskz_extractf32x8_ps(i8 %__U, <16 x float> %__A) {
; SKX-LABEL: test_mm512_maskz_extractf32x8_ps:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf32x8 $1, %zmm0, %ymm0 {%k1} {z}
; SKX-NEXT: retq
@@ -885,7 +885,7 @@ entry:
define <2 x double> @test_mm512_mask_extractf64x2_pd(<2 x double> %__W, i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_mask_extractf64x2_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x2 $3, %zmm1, %xmm0 {%k1}
; SKX-NEXT: vzeroupper
@@ -900,7 +900,7 @@ entry:
define <2 x double> @test_mm512_maskz_extractf64x2_pd(i8 %__U, <8 x double> %__A) {
; SKX-LABEL: test_mm512_maskz_extractf64x2_pd:
-; SKX: ## BB#0: ## %entry
+; SKX: ## %bb.0: ## %entry
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vextractf64x2 $3, %zmm0, %xmm0 {%k1} {z}
; SKX-NEXT: vzeroupper
Modified: llvm/trunk/test/CodeGen/X86/avx512-fma-commute.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-fma-commute.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-fma-commute.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-fma-commute.ll Mon Dec 4 09:18:51 2017
@@ -8,7 +8,7 @@ declare <2 x double> @llvm.x86.avx512.ma
define <4 x float> @test_int_x86_avx512_mask3_vfmadd_ss_load0(<4 x float>* %x0ptr, <4 x float> %x1, <4 x float> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_load0:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd231ss (%rdi), %xmm0, %xmm1
; CHECK-NEXT: vmovaps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -19,7 +19,7 @@ define <4 x float> @test_int_x86_avx512_
define <4 x float> @test_int_x86_avx512_mask3_vfmadd_ss_load1(<4 x float> %x0, <4 x float>* %x1ptr, <4 x float> %x2){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_load1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd231ss (%rdi), %xmm0, %xmm1
; CHECK-NEXT: vmovaps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -30,7 +30,7 @@ define <4 x float> @test_int_x86_avx512_
define <2 x double> @test_int_x86_avx512_mask3_vfmadd_sd_load0(<2 x double>* %x0ptr, <2 x double> %x1, <2 x double> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_sd_load0:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd231sd (%rdi), %xmm0, %xmm1
; CHECK-NEXT: vmovapd %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -41,7 +41,7 @@ define <2 x double> @test_int_x86_avx512
define <2 x double> @test_int_x86_avx512_mask3_vfmadd_sd_load1(<2 x double> %x0, <2 x double>* %x1ptr, <2 x double> %x2){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_sd_load1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd231sd (%rdi), %xmm0, %xmm1
; CHECK-NEXT: vmovapd %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -52,7 +52,7 @@ define <2 x double> @test_int_x86_avx512
define <4 x float> @test_int_x86_avx512_mask3_vfmsub_ss_load0(<4 x float>* %x0ptr, <4 x float> %x1, <4 x float> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ss_load0:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmsub231ss (%rdi), %xmm0, %xmm1
; CHECK-NEXT: vmovaps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -63,7 +63,7 @@ define <4 x float> @test_int_x86_avx512_
define <4 x float> @test_int_x86_avx512_mask3_vfmsub_ss_load1(<4 x float> %x0, <4 x float>* %x1ptr, <4 x float> %x2){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ss_load1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmsub231ss (%rdi), %xmm0, %xmm1
; CHECK-NEXT: vmovaps %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -74,7 +74,7 @@ define <4 x float> @test_int_x86_avx512_
define <2 x double> @test_int_x86_avx512_mask3_vfmsub_sd_load0(<2 x double>* %x0ptr, <2 x double> %x1, <2 x double> %x2) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_sd_load0:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmsub231sd (%rdi), %xmm0, %xmm1
; CHECK-NEXT: vmovapd %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -85,7 +85,7 @@ define <2 x double> @test_int_x86_avx512
define <2 x double> @test_int_x86_avx512_mask3_vfmsub_sd_load1(<2 x double> %x0, <2 x double>* %x1ptr, <2 x double> %x2){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_sd_load1:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmsub231sd (%rdi), %xmm0, %xmm1
; CHECK-NEXT: vmovapd %xmm1, %xmm0
; CHECK-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/avx512-fma-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-fma-intrinsics.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-fma-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-fma-intrinsics.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@ declare <8 x double> @llvm.x86.avx512.ma
define <16 x float> @test_x86_vfnmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_x86_vfnmadd_ps_z:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmadd213ps %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfnmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind
@@ -16,7 +16,7 @@ declare <16 x float> @llvm.x86.avx512.ma
define <16 x float> @test_mask_vfnmadd_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
; CHECK-LABEL: test_mask_vfnmadd_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmadd132ps %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -26,7 +26,7 @@ define <16 x float> @test_mask_vfnmadd_p
define <8 x double> @test_x86_vfnmadd_pd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_x86_vfnmadd_pd_z:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmadd213pd %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfnmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind
@@ -36,7 +36,7 @@ declare <8 x double> @llvm.x86.avx512.ma
define <8 x double> @test_mask_vfnmadd_pd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfnmadd_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmadd132pd %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -46,7 +46,7 @@ define <8 x double> @test_mask_vfnmadd_p
define <16 x float> @test_x86_vfnmsubps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_x86_vfnmsubps_z:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfnmsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind
@@ -56,7 +56,7 @@ declare <16 x float> @llvm.x86.avx512.ma
define <16 x float> @test_mask_vfnmsub_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
; CHECK-LABEL: test_mask_vfnmsub_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmsub132ps %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -66,7 +66,7 @@ define <16 x float> @test_mask_vfnmsub_p
define <8 x double> @test_x86_vfnmsubpd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_x86_vfnmsubpd_z:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmsub213pd %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind
@@ -76,7 +76,7 @@ declare <8 x double> @llvm.x86.avx512.ma
define <8 x double> @test_mask_vfnmsub_pd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfnmsub_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmsub132pd %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -86,7 +86,7 @@ define <8 x double> @test_mask_vfnmsub_p
define <16 x float> @test_x86_vfmaddsubps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_x86_vfmaddsubps_z:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmaddsub213ps %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind
@@ -95,7 +95,7 @@ define <16 x float> @test_x86_vfmaddsubp
define <16 x float> @test_mask_fmaddsub_ps(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) {
; CHECK-LABEL: test_mask_fmaddsub_ps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmaddsub132ps %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -107,7 +107,7 @@ declare <16 x float> @llvm.x86.avx512.ma
define <8 x double> @test_x86_vfmaddsubpd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_x86_vfmaddsubpd_z:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmaddsub213pd %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind
@@ -117,7 +117,7 @@ declare <8 x double> @llvm.x86.avx512.ma
define <8 x double> @test_mask_vfmaddsub_pd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmaddsub_pd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmaddsub132pd %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -127,7 +127,7 @@ define <8 x double> @test_mask_vfmaddsub
define <8 x double>@test_int_x86_avx512_mask_vfmaddsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm0, %zmm3
; CHECK-NEXT: vfmaddsub132pd %zmm1, %zmm2, %zmm3 {%k1}
@@ -144,7 +144,7 @@ declare <8 x double> @llvm.x86.avx512.ma
define <8 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm2, %zmm3
; CHECK-NEXT: vfmaddsub231pd %zmm1, %zmm0, %zmm3 {%k1}
@@ -161,7 +161,7 @@ declare <8 x double> @llvm.x86.avx512.ma
define <8 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm1, %zmm3
; CHECK-NEXT: vfmaddsub213pd %zmm2, %zmm0, %zmm3 {%k1} {z}
@@ -176,7 +176,7 @@ define <8 x double>@test_int_x86_avx512_
define <16 x float>@test_int_x86_avx512_mask_vfmaddsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm0, %zmm3
; CHECK-NEXT: vfmaddsub132ps %zmm1, %zmm2, %zmm3 {%k1}
@@ -193,7 +193,7 @@ declare <16 x float> @llvm.x86.avx512.ma
define <16 x float>@test_int_x86_avx512_mask3_vfmaddsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm2, %zmm3
; CHECK-NEXT: vfmaddsub231ps %zmm1, %zmm0, %zmm3 {%k1}
@@ -210,7 +210,7 @@ declare <16 x float> @llvm.x86.avx512.ma
define <16 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm1, %zmm3
; CHECK-NEXT: vfmaddsub213ps %zmm2, %zmm0, %zmm3 {%k1} {z}
@@ -227,7 +227,7 @@ declare <8 x double> @llvm.x86.avx512.ma
define <8 x double>@test_int_x86_avx512_mask3_vfmsubadd_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm2, %zmm3
; CHECK-NEXT: vfmsubadd231pd %zmm1, %zmm0, %zmm3 {%k1}
@@ -244,7 +244,7 @@ declare <16 x float> @llvm.x86.avx512.ma
define <16 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm2, %zmm3
; CHECK-NEXT: vfmsubadd231ps %zmm1, %zmm0, %zmm3 {%k1}
@@ -259,7 +259,7 @@ define <16 x float>@test_int_x86_avx512_
define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rne(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_rne:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -269,7 +269,7 @@ define <16 x float> @test_mask_round_vfm
define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtn(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_rtn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132ps {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -279,7 +279,7 @@ define <16 x float> @test_mask_round_vfm
define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtp(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_rtp:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132ps {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -289,7 +289,7 @@ define <16 x float> @test_mask_round_vfm
define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtz(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_rtz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132ps {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -299,7 +299,7 @@ define <16 x float> @test_mask_round_vfm
define <16 x float> @test_mask_round_vfmadd512_ps_rrb_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132ps %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -309,7 +309,7 @@ define <16 x float> @test_mask_round_vfm
define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rne(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_rne:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 0) nounwind
@@ -318,7 +318,7 @@ define <16 x float> @test_mask_round_vfm
define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtn(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_rtn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps {rd-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 1) nounwind
@@ -327,7 +327,7 @@ define <16 x float> @test_mask_round_vfm
define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtp(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_rtp:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps {ru-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 2) nounwind
@@ -336,7 +336,7 @@ define <16 x float> @test_mask_round_vfm
define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtz(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_rtz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps {rz-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 3) nounwind
@@ -345,7 +345,7 @@ define <16 x float> @test_mask_round_vfm
define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind
@@ -356,7 +356,7 @@ declare <8 x double> @llvm.x86.avx512.ma
define <8 x double>@test_int_x86_avx512_mask3_vfmsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm2, %zmm3
; CHECK-NEXT: vfmsub231pd %zmm1, %zmm0, %zmm3 {%k1}
@@ -373,7 +373,7 @@ declare <16 x float> @llvm.x86.avx512.ma
define <16 x float>@test_int_x86_avx512_mask3_vfmsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm2, %zmm3
; CHECK-NEXT: vfmsub231ps %zmm1, %zmm0, %zmm3 {%k1}
@@ -388,7 +388,7 @@ define <16 x float>@test_int_x86_avx512_
define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_rne:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -398,7 +398,7 @@ define <8 x double> @test_mask_round_vfm
define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_rtn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132pd {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -408,7 +408,7 @@ define <8 x double> @test_mask_round_vfm
define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_rtp:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132pd {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -418,7 +418,7 @@ define <8 x double> @test_mask_round_vfm
define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_rtz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132pd {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -428,7 +428,7 @@ define <8 x double> @test_mask_round_vfm
define <8 x double> @test_mask_round_vfmadd512_pd_rrb_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd132pd %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -438,7 +438,7 @@ define <8 x double> @test_mask_round_vfm
define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_rne:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 0) nounwind
@@ -447,7 +447,7 @@ define <8 x double> @test_mask_round_vfm
define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_rtn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd {rd-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 1) nounwind
@@ -456,7 +456,7 @@ define <8 x double> @test_mask_round_vfm
define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_rtp:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd {ru-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 2) nounwind
@@ -465,7 +465,7 @@ define <8 x double> @test_mask_round_vfm
define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_rtz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd {rz-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 3) nounwind
@@ -474,7 +474,7 @@ define <8 x double> @test_mask_round_vfm
define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind
@@ -483,7 +483,7 @@ define <8 x double> @test_mask_round_vfm
define <8 x double>@test_int_x86_avx512_mask_vfmadd_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm0, %zmm3
; CHECK-NEXT: vfmadd132pd %zmm1, %zmm2, %zmm3 {%k1}
@@ -500,7 +500,7 @@ declare <8 x double> @llvm.x86.avx512.ma
define <8 x double>@test_int_x86_avx512_mask3_vfmadd_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm2, %zmm3
; CHECK-NEXT: vfmadd231pd %zmm1, %zmm0, %zmm3 {%k1}
@@ -517,7 +517,7 @@ declare <8 x double> @llvm.x86.avx512.ma
define <8 x double>@test_int_x86_avx512_maskz_vfmadd_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm1, %zmm3
; CHECK-NEXT: vfmadd213pd %zmm2, %zmm0, %zmm3 {%k1} {z}
@@ -532,7 +532,7 @@ define <8 x double>@test_int_x86_avx512_
define <16 x float>@test_int_x86_avx512_mask_vfmadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm0, %zmm3
; CHECK-NEXT: vfmadd132ps %zmm1, %zmm2, %zmm3 {%k1}
@@ -549,7 +549,7 @@ declare <16 x float> @llvm.x86.avx512.ma
define <16 x float>@test_int_x86_avx512_mask3_vfmadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm2, %zmm3
; CHECK-NEXT: vfmadd231ps %zmm1, %zmm0, %zmm3 {%k1}
@@ -566,7 +566,7 @@ declare <16 x float> @llvm.x86.avx512.ma
define <16 x float>@test_int_x86_avx512_maskz_vfmadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm1, %zmm3
; CHECK-NEXT: vfmadd213ps %zmm2, %zmm0, %zmm3 {%k1} {z}
@@ -582,7 +582,7 @@ define <16 x float>@test_int_x86_avx512_
define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_rne:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmsub132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -592,7 +592,7 @@ define <8 x double> @test_mask_round_vfn
define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_rtn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmsub132pd {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -602,7 +602,7 @@ define <8 x double> @test_mask_round_vfn
define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_rtp:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmsub132pd {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -612,7 +612,7 @@ define <8 x double> @test_mask_round_vfn
define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_rtz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmsub132pd {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -622,7 +622,7 @@ define <8 x double> @test_mask_round_vfn
define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfnmsub132pd %zmm1, %zmm2, %zmm0 {%k1}
; CHECK-NEXT: retq
@@ -632,7 +632,7 @@ define <8 x double> @test_mask_round_vfn
define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_rne:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 0) nounwind
@@ -641,7 +641,7 @@ define <8 x double> @test_mask_round_vfn
define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_rtn:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmsub213pd {rd-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 1) nounwind
@@ -650,7 +650,7 @@ define <8 x double> @test_mask_round_vfn
define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_rtp:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmsub213pd {ru-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 2) nounwind
@@ -659,7 +659,7 @@ define <8 x double> @test_mask_round_vfn
define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_rtz:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmsub213pd {rz-sae}, %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 3) nounwind
@@ -668,7 +668,7 @@ define <8 x double> @test_mask_round_vfn
define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_current:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vfnmsub213pd %zmm2, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind
@@ -677,7 +677,7 @@ define <8 x double> @test_mask_round_vfn
define <8 x double>@test_int_x86_avx512_mask_vfnmsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm0, %zmm3
; CHECK-NEXT: vfnmsub132pd %zmm1, %zmm2, %zmm3 {%k1}
@@ -694,7 +694,7 @@ declare <8 x double> @llvm.x86.avx512.ma
define <8 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm2, %zmm3
; CHECK-NEXT: vfnmsub231pd %zmm1, %zmm0, %zmm3 {%k1}
@@ -709,7 +709,7 @@ define <8 x double>@test_int_x86_avx512_
define <16 x float>@test_int_x86_avx512_mask_vfnmsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm0, %zmm3
; CHECK-NEXT: vfnmsub132ps %zmm1, %zmm2, %zmm3 {%k1}
@@ -726,7 +726,7 @@ declare <16 x float> @llvm.x86.avx512.ma
define <16 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm2, %zmm3
; CHECK-NEXT: vfnmsub231ps %zmm1, %zmm0, %zmm3 {%k1}
@@ -741,7 +741,7 @@ define <16 x float>@test_int_x86_avx512_
define <8 x double>@test_int_x86_avx512_mask_vfnmadd_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_pd_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %zmm0, %zmm3
; CHECK-NEXT: vfnmadd132pd %zmm1, %zmm2, %zmm3 {%k1}
@@ -756,7 +756,7 @@ define <8 x double>@test_int_x86_avx512_
define <16 x float>@test_int_x86_avx512_mask_vfnmadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){
; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_ps_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %zmm0, %zmm3
; CHECK-NEXT: vfnmadd132ps %zmm1, %zmm2, %zmm3 {%k1}
Modified: llvm/trunk/test/CodeGen/X86/avx512-fma.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-fma.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-fma.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-fma.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define <16 x float> @test_x86_fmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; ALL-LABEL: test_x86_fmadd_ps_z:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0
; ALL-NEXT: retq
%x = fmul <16 x float> %a0, %a1
@@ -14,7 +14,7 @@ define <16 x float> @test_x86_fmadd_ps_z
define <16 x float> @test_x86_fmsub_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; ALL-LABEL: test_x86_fmsub_ps_z:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmsub213ps %zmm2, %zmm1, %zmm0
; ALL-NEXT: retq
%x = fmul <16 x float> %a0, %a1
@@ -24,7 +24,7 @@ define <16 x float> @test_x86_fmsub_ps_z
define <16 x float> @test_x86_fnmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; ALL-LABEL: test_x86_fnmadd_ps_z:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfnmadd213ps %zmm2, %zmm1, %zmm0
; ALL-NEXT: retq
%x = fmul <16 x float> %a0, %a1
@@ -34,7 +34,7 @@ define <16 x float> @test_x86_fnmadd_ps_
define <16 x float> @test_x86_fnmsub_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) {
; ALL-LABEL: test_x86_fnmsub_ps_z:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0
; ALL-NEXT: retq
%x = fmul <16 x float> %a0, %a1
@@ -48,7 +48,7 @@ define <16 x float> @test_x86_fnmsub_ps_
define <8 x double> @test_x86_fmadd_pd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; ALL-LABEL: test_x86_fmadd_pd_z:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0
; ALL-NEXT: retq
%x = fmul <8 x double> %a0, %a1
@@ -58,7 +58,7 @@ define <8 x double> @test_x86_fmadd_pd_z
define <8 x double> @test_x86_fmsub_pd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) {
; ALL-LABEL: test_x86_fmsub_pd_z:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmsub213pd %zmm2, %zmm1, %zmm0
; ALL-NEXT: retq
%x = fmul <8 x double> %a0, %a1
@@ -68,7 +68,7 @@ define <8 x double> @test_x86_fmsub_pd_z
define double @test_x86_fmsub_213(double %a0, double %a1, double %a2) {
; ALL-LABEL: test_x86_fmsub_213:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0
; ALL-NEXT: retq
%x = fmul double %a0, %a1
@@ -78,7 +78,7 @@ define double @test_x86_fmsub_213(double
define double @test_x86_fmsub_213_m(double %a0, double %a1, double * %a2_ptr) {
; ALL-LABEL: test_x86_fmsub_213_m:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmsub213sd (%rdi), %xmm1, %xmm0
; ALL-NEXT: retq
%a2 = load double , double *%a2_ptr
@@ -89,7 +89,7 @@ define double @test_x86_fmsub_213_m(doub
define double @test_x86_fmsub_231_m(double %a0, double %a1, double * %a2_ptr) {
; ALL-LABEL: test_x86_fmsub_231_m:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmsub132sd (%rdi), %xmm1, %xmm0
; ALL-NEXT: retq
%a2 = load double , double *%a2_ptr
@@ -100,7 +100,7 @@ define double @test_x86_fmsub_231_m(doub
define <16 x float> @test231_br(<16 x float> %a1, <16 x float> %a2) nounwind {
; ALL-LABEL: test231_br:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmadd132ps {{.*}}(%rip){1to16}, %zmm1, %zmm0
; ALL-NEXT: retq
%b1 = fmul <16 x float> %a1, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000>
@@ -110,7 +110,7 @@ define <16 x float> @test231_br(<16 x fl
define <16 x float> @test213_br(<16 x float> %a1, <16 x float> %a2) nounwind {
; ALL-LABEL: test213_br:
-; ALL: ## BB#0:
+; ALL: ## %bb.0:
; ALL-NEXT: vfmadd213ps {{.*}}(%rip){1to16}, %zmm1, %zmm0
; ALL-NEXT: retq
%b1 = fmul <16 x float> %a1, %a2
@@ -121,7 +121,7 @@ define <16 x float> @test213_br(<16 x fl
;mask (a*c+b , a)
define <16 x float> @test_x86_fmadd132_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> *%a2_ptrt, <16 x i1> %mask) {
; KNL-LABEL: test_x86_fmadd132_ps:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL-NEXT: vpslld $31, %zmm2, %zmm2
; KNL-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -129,7 +129,7 @@ define <16 x float> @test_x86_fmadd132_p
; KNL-NEXT: retq
;
; SKX-LABEL: test_x86_fmadd132_ps:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %xmm2, %xmm2
; SKX-NEXT: vpmovb2m %xmm2, %k1
; SKX-NEXT: vfmadd132ps (%rdi), %zmm1, %zmm0 {%k1}
@@ -144,7 +144,7 @@ define <16 x float> @test_x86_fmadd132_p
;mask (a*c+b , b)
define <16 x float> @test_x86_fmadd231_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> *%a2_ptrt, <16 x i1> %mask) {
; KNL-LABEL: test_x86_fmadd231_ps:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL-NEXT: vpslld $31, %zmm2, %zmm2
; KNL-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -153,7 +153,7 @@ define <16 x float> @test_x86_fmadd231_p
; KNL-NEXT: retq
;
; SKX-LABEL: test_x86_fmadd231_ps:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %xmm2, %xmm2
; SKX-NEXT: vpmovb2m %xmm2, %k1
; SKX-NEXT: vfmadd231ps (%rdi), %zmm0, %zmm1 {%k1}
@@ -169,7 +169,7 @@ define <16 x float> @test_x86_fmadd231_p
;mask (b*a+c , b)
define <16 x float> @test_x86_fmadd213_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> *%a2_ptrt, <16 x i1> %mask) {
; KNL-LABEL: test_x86_fmadd213_ps:
-; KNL: ## BB#0:
+; KNL: ## %bb.0:
; KNL-NEXT: vpmovsxbd %xmm2, %zmm2
; KNL-NEXT: vpslld $31, %zmm2, %zmm2
; KNL-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -178,7 +178,7 @@ define <16 x float> @test_x86_fmadd213_p
; KNL-NEXT: retq
;
; SKX-LABEL: test_x86_fmadd213_ps:
-; SKX: ## BB#0:
+; SKX: ## %bb.0:
; SKX-NEXT: vpsllw $7, %xmm2, %xmm2
; SKX-NEXT: vpmovb2m %xmm2, %k1
; SKX-NEXT: vfmadd213ps (%rdi), %zmm0, %zmm1 {%k1}
Modified: llvm/trunk/test/CodeGen/X86/avx512-fsel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-fsel.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-fsel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-fsel.ll Mon Dec 4 09:18:51 2017
@@ -6,7 +6,7 @@ target triple = "x86_64-apple-macosx10.1
define i32 @test(float %a, float %b) {
; CHECK-LABEL: test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vucomiss %xmm1, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll Mon Dec 4 09:18:51 2017
@@ -13,7 +13,7 @@ declare void @llvm.x86.avx512.scatter.qp
define void @gather_mask_dps(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_dps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vgatherdps (%rsi,%zmm0,4), %zmm1 {%k2}
@@ -29,7 +29,7 @@ define void @gather_mask_dps(<16 x i32>
define void @gather_mask_dpd(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_dpd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vgatherdpd (%rsi,%ymm0,4), %zmm1 {%k2}
@@ -45,7 +45,7 @@ define void @gather_mask_dpd(<8 x i32> %
define void @gather_mask_qps(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_qps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vgatherqps (%rsi,%zmm0,4), %ymm1 {%k2}
@@ -61,7 +61,7 @@ define void @gather_mask_qps(<8 x i64> %
define void @gather_mask_qpd(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_qpd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vgatherqpd (%rsi,%zmm0,4), %zmm1 {%k2}
@@ -89,7 +89,7 @@ declare void @llvm.x86.avx512.scatter.qp
define void @gather_mask_dd(<16 x i32> %ind, <16 x i32> %src, i16 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_dd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherdd (%rsi,%zmm0,4), %zmm1 {%k2}
@@ -105,7 +105,7 @@ define void @gather_mask_dd(<16 x i32> %
define void @gather_mask_qd(<8 x i64> %ind, <8 x i32> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_qd:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherqd (%rsi,%zmm0,4), %ymm1 {%k2}
@@ -121,7 +121,7 @@ define void @gather_mask_qd(<8 x i64> %i
define void @gather_mask_qq(<8 x i64> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_qq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherqq (%rsi,%zmm0,4), %zmm1 {%k2}
@@ -137,7 +137,7 @@ define void @gather_mask_qq(<8 x i64> %i
define void @gather_mask_dq(<8 x i32> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_mask_dq:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: kmovq %k1, %k2
; CHECK-NEXT: vpgatherdq (%rsi,%ymm0,4), %zmm1 {%k2}
@@ -153,7 +153,7 @@ define void @gather_mask_dq(<8 x i32> %i
define void @gather_mask_dpd_execdomain(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf) {
; CHECK-LABEL: gather_mask_dpd_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vgatherdpd (%rsi,%ymm0,4), %zmm1 {%k1}
; CHECK-NEXT: vmovapd %zmm1, (%rdx)
@@ -166,7 +166,7 @@ define void @gather_mask_dpd_execdomain(
define void @gather_mask_qpd_execdomain(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf) {
; CHECK-LABEL: gather_mask_qpd_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vgatherqpd (%rsi,%zmm0,4), %zmm1 {%k1}
; CHECK-NEXT: vmovapd %zmm1, (%rdx)
@@ -179,7 +179,7 @@ define void @gather_mask_qpd_execdomain(
define <16 x float> @gather_mask_dps_execdomain(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base) {
; CHECK-LABEL: gather_mask_dps_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vgatherdps (%rsi,%zmm0,4), %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
@@ -190,7 +190,7 @@ define <16 x float> @gather_mask_dps_exe
define <8 x float> @gather_mask_qps_execdomain(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base) {
; CHECK-LABEL: gather_mask_qps_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vgatherqps (%rsi,%zmm0,4), %ymm1 {%k1}
; CHECK-NEXT: vmovaps %ymm1, %ymm0
@@ -201,7 +201,7 @@ define <8 x float> @gather_mask_qps_exec
define void @scatter_mask_dpd_execdomain(<8 x i32> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: scatter_mask_dpd_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vscatterdpd %zmm1, (%rcx,%ymm0,4) {%k1}
@@ -214,7 +214,7 @@ define void @scatter_mask_dpd_execdomain
define void @scatter_mask_qpd_execdomain(<8 x i64> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: scatter_mask_qpd_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vscatterqpd %zmm1, (%rcx,%zmm0,4) {%k1}
@@ -227,7 +227,7 @@ define void @scatter_mask_qpd_execdomain
define void @scatter_mask_dps_execdomain(<16 x i32> %ind, <16 x float>* %src, i16 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: scatter_mask_dps_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovaps (%rdi), %zmm1
; CHECK-NEXT: vscatterdps %zmm1, (%rcx,%zmm0,4) {%k1}
@@ -240,7 +240,7 @@ define void @scatter_mask_dps_execdomain
define void @scatter_mask_qps_execdomain(<8 x i64> %ind, <8 x float>* %src, i8 %mask, i8* %base, i8* %stbuf) {
; CHECK-LABEL: scatter_mask_qps_execdomain:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovaps (%rdi), %ymm1
; CHECK-NEXT: vscatterqps %ymm1, (%rcx,%zmm0,4) {%k1}
@@ -253,7 +253,7 @@ define void @scatter_mask_qps_execdomain
define void @gather_qps(<8 x i64> %ind, <8 x float> %src, i8* %base, i8* %stbuf) {
; CHECK-LABEL: gather_qps:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
@@ -272,7 +272,7 @@ declare void @llvm.x86.avx512.gatherpf.
declare void @llvm.x86.avx512.scatterpf.qps.512(i8, <8 x i64>, i8* , i32, i32);
define void @prefetch(<8 x i64> %ind, i8* %base) {
; CHECK-LABEL: prefetch:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vgatherpf0qps (%rdi,%zmm0,4) {%k1}
; CHECK-NEXT: kxorw %k0, %k0, %k1
@@ -296,7 +296,7 @@ declare <2 x double> @llvm.x86.avx512.ga
define <2 x double>@test_int_x86_avx512_gather3div2_df(<2 x double> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div2_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherqpd (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -314,7 +314,7 @@ declare <2 x i64> @llvm.x86.avx512.gathe
define <2 x i64>@test_int_x86_avx512_gather3div2_di(<2 x i64> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div2_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpgatherqq (%rdi,%xmm1,8), %xmm0 {%k1}
; CHECK-NEXT: vpaddq %xmm0, %xmm0, %xmm0
@@ -329,7 +329,7 @@ declare <4 x double> @llvm.x86.avx512.ga
define <4 x double>@test_int_x86_avx512_gather3div4_df(<4 x double> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div4_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherqpd (%rdi,%ymm1,4), %ymm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -347,7 +347,7 @@ declare <4 x i64> @llvm.x86.avx512.gathe
define <4 x i64>@test_int_x86_avx512_gather3div4_di(<4 x i64> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div4_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpgatherqq (%rdi,%ymm1,8), %ymm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -365,7 +365,7 @@ declare <4 x float> @llvm.x86.avx512.gat
define <4 x float>@test_int_x86_avx512_gather3div4_sf(<4 x float> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div4_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherqps (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -383,7 +383,7 @@ declare <4 x i32> @llvm.x86.avx512.gathe
define <4 x i32>@test_int_x86_avx512_gather3div4_si(<4 x i32> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div4_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -401,7 +401,7 @@ declare <4 x float> @llvm.x86.avx512.gat
define <4 x float>@test_int_x86_avx512_gather3div8_sf(<4 x float> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div8_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherqps (%rdi,%ymm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -420,7 +420,7 @@ declare <4 x i32> @llvm.x86.avx512.gathe
define <4 x i32>@test_int_x86_avx512_gather3div8_si(<4 x i32> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3div8_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm2
; CHECK-NEXT: kmovq %k1, %k2
@@ -439,7 +439,7 @@ declare <2 x double> @llvm.x86.avx512.ga
define <2 x double>@test_int_x86_avx512_gather3siv2_df(<2 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv2_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherdpd (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -457,7 +457,7 @@ declare <2 x i64> @llvm.x86.avx512.gathe
define <2 x i64>@test_int_x86_avx512_gather3siv2_di(<2 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv2_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpgatherdq (%rdi,%xmm1,8), %xmm0 {%k1}
; CHECK-NEXT: vpaddq %xmm0, %xmm0, %xmm0
@@ -472,7 +472,7 @@ declare <4 x double> @llvm.x86.avx512.ga
define <4 x double>@test_int_x86_avx512_gather3siv4_df(<4 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv4_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherdpd (%rdi,%xmm1,4), %ymm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -490,7 +490,7 @@ declare <4 x i64> @llvm.x86.avx512.gathe
define <4 x i64>@test_int_x86_avx512_gather3siv4_di(<4 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv4_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpgatherdq (%rdi,%xmm1,8), %ymm0 {%k1}
; CHECK-NEXT: vpaddq %ymm0, %ymm0, %ymm0
@@ -505,7 +505,7 @@ declare <4 x float> @llvm.x86.avx512.gat
define <4 x float>@test_int_x86_avx512_gather3siv4_sf(<4 x float> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv4_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherdps (%rdi,%xmm1,4), %xmm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -523,7 +523,7 @@ declare <4 x i32> @llvm.x86.avx512.gathe
define <4 x i32>@test_int_x86_avx512_gather3siv4_si(<4 x i32> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv4_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -541,7 +541,7 @@ declare <8 x float> @llvm.x86.avx512.gat
define <8 x float>@test_int_x86_avx512_gather3siv8_sf(<8 x float> %x0, i8* %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv8_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vgatherdps (%rdi,%ymm1,4), %ymm0 {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -559,7 +559,7 @@ declare <8 x i32> @llvm.x86.avx512.gathe
define <8 x i32>@test_int_x86_avx512_gather3siv8_si(<8 x i32> %x0, i8* %x1, <8 x i32> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_gather3siv8_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm2
; CHECK-NEXT: kmovq %k1, %k2
@@ -577,7 +577,7 @@ declare void @llvm.x86.avx512.scatterdiv
define void at test_int_x86_avx512_scatterdiv2_df(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x double> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv2_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vscatterqpd %xmm1, (%rdi,%xmm0,2) {%k2}
@@ -592,7 +592,7 @@ declare void @llvm.x86.avx512.scatterdiv
define void at test_int_x86_avx512_scatterdiv2_di(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x i64> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv2_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterqq %xmm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -607,7 +607,7 @@ declare void @llvm.x86.avx512.scatterdiv
define void at test_int_x86_avx512_scatterdiv4_df(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x double> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterqpd %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -623,7 +623,7 @@ declare void @llvm.x86.avx512.scatterdiv
define void at test_int_x86_avx512_scatterdiv4_di(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i64> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterqq %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -639,7 +639,7 @@ declare void @llvm.x86.avx512.scatterdiv
define void at test_int_x86_avx512_scatterdiv4_sf(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x float> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterqps %xmm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -654,7 +654,7 @@ declare void @llvm.x86.avx512.scatterdiv
define void at test_int_x86_avx512_scatterdiv4_si(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x i32> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpscatterqd %xmm1, (%rdi,%xmm0,2) {%k2}
@@ -669,7 +669,7 @@ declare void @llvm.x86.avx512.scatterdiv
define void at test_int_x86_avx512_scatterdiv8_sf(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x float> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv8_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterqps %xmm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -685,7 +685,7 @@ declare void @llvm.x86.avx512.scatterdiv
define void at test_int_x86_avx512_scatterdiv8_si(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i32> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scatterdiv8_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterqd %xmm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -701,7 +701,7 @@ declare void @llvm.x86.avx512.scattersiv
define void at test_int_x86_avx512_scattersiv2_df(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x double> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv2_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vscatterdpd %xmm1, (%rdi,%xmm0,2) {%k2}
@@ -716,7 +716,7 @@ declare void @llvm.x86.avx512.scattersiv
define void at test_int_x86_avx512_scattersiv2_di(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x i64> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv2_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpscatterdq %xmm1, (%rdi,%xmm0,2) {%k2}
@@ -731,7 +731,7 @@ declare void @llvm.x86.avx512.scattersiv
define void at test_int_x86_avx512_scattersiv4_df(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x double> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv4_df:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterdpd %ymm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -747,7 +747,7 @@ declare void @llvm.x86.avx512.scattersiv
define void at test_int_x86_avx512_scattersiv4_di(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i64> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv4_di:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: kxnorw %k0, %k0, %k2
; CHECK-NEXT: vpscatterdq %ymm1, (%rdi,%xmm0,2) {%k2}
@@ -763,7 +763,7 @@ declare void @llvm.x86.avx512.scattersiv
define void at test_int_x86_avx512_scattersiv4_sf(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x float> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv4_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterdps %xmm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -778,7 +778,7 @@ declare void @llvm.x86.avx512.scattersiv
define void at test_int_x86_avx512_scattersiv4_si(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i32> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv4_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterdd %xmm1, (%rdi,%xmm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -793,7 +793,7 @@ declare void @llvm.x86.avx512.scattersiv
define void at test_int_x86_avx512_scattersiv8_sf(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x float> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv8_sf:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vscatterdps %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -809,7 +809,7 @@ declare void @llvm.x86.avx512.scattersiv
define void at test_int_x86_avx512_scattersiv8_si(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x i32> %x3) {
; CHECK-LABEL: test_int_x86_avx512_scattersiv8_si:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpscatterdd %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxnorw %k0, %k0, %k1
@@ -823,7 +823,7 @@ define void at test_int_x86_avx512_scatters
define void @scatter_mask_test(i8* %x0, <8 x i32> %x2, <8 x i32> %x3) {
; CHECK-LABEL: scatter_mask_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vpscatterdd %ymm1, (%rdi,%ymm0,2) {%k1}
; CHECK-NEXT: kxorw %k0, %k0, %k1
@@ -845,7 +845,7 @@ define void @scatter_mask_test(i8* %x0,
define <16 x float> @gather_mask_test(<16 x i32> %ind, <16 x float> %src, i8* %base) {
; CHECK-LABEL: gather_mask_test:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kxnorw %k0, %k0, %k1
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm2 {%k1}
Modified: llvm/trunk/test/CodeGen/X86/avx512-gfni-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-gfni-intrinsics.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-gfni-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-gfni-intrinsics.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
declare <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8>, <16 x i8>, i8)
define <16 x i8> @test_vgf2p8affineinvqb_128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i16 %mask) {
; CHECK-LABEL: test_vgf2p8affineinvqb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8affineinvqb $3, %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0xf9,0xcf,0xd9,0x03]
; CHECK-NEXT: vgf2p8affineinvqb $3, %xmm1, %xmm0, %xmm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0x89,0xcf,0xe1,0x03]
@@ -24,7 +24,7 @@ define <16 x i8> @test_vgf2p8affineinvqb
declare <32 x i8> @llvm.x86.vgf2p8affineinvqb.256(<32 x i8>, <32 x i8>, i8)
define <32 x i8> @test_vgf2p8affineinvqb_256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i32 %mask) {
; CHECK-LABEL: test_vgf2p8affineinvqb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8affineinvqb $3, %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0xfd,0xcf,0xd9,0x03]
; CHECK-NEXT: vgf2p8affineinvqb $3, %ymm1, %ymm0, %ymm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0xcf,0xe1,0x03]
@@ -44,7 +44,7 @@ define <32 x i8> @test_vgf2p8affineinvqb
declare <64 x i8> @llvm.x86.vgf2p8affineinvqb.512(<64 x i8>, <64 x i8>, i8)
define <64 x i8> @test_vgf2p8affineinvqb_512(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru, i64 %mask) {
; CHECK-LABEL: test_vgf2p8affineinvqb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k1 ## encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8affineinvqb $3, %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf3,0xfd,0x48,0xcf,0xd9,0x03]
; CHECK-NEXT: vgf2p8affineinvqb $3, %zmm1, %zmm0, %zmm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xc9,0xcf,0xe1,0x03]
@@ -64,7 +64,7 @@ define <64 x i8> @test_vgf2p8affineinvqb
declare <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8>, <16 x i8>, i8)
define <16 x i8> @test_vgf2p8affineqb_128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i16 %mask) {
; CHECK-LABEL: test_vgf2p8affineqb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8affineqb $3, %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0xf9,0xce,0xd9,0x03]
; CHECK-NEXT: vgf2p8affineqb $3, %xmm1, %xmm0, %xmm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0x89,0xce,0xe1,0x03]
@@ -84,7 +84,7 @@ define <16 x i8> @test_vgf2p8affineqb_12
declare <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8>, <32 x i8>, i8)
define <32 x i8> @test_vgf2p8affineqb_256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i32 %mask) {
; CHECK-LABEL: test_vgf2p8affineqb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8affineqb $3, %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0xfd,0xce,0xd9,0x03]
; CHECK-NEXT: vgf2p8affineqb $3, %ymm1, %ymm0, %ymm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0xce,0xe1,0x03]
@@ -104,7 +104,7 @@ define <32 x i8> @test_vgf2p8affineqb_25
declare <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8>, <64 x i8>, i8)
define <64 x i8> @test_vgf2p8affineqb_512(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru, i64 %mask) {
; CHECK-LABEL: test_vgf2p8affineqb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k1 ## encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8affineqb $3, %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf3,0xfd,0x48,0xce,0xd9,0x03]
; CHECK-NEXT: vgf2p8affineqb $3, %zmm1, %zmm0, %zmm4 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xc9,0xce,0xe1,0x03]
@@ -124,7 +124,7 @@ define <64 x i8> @test_vgf2p8affineqb_51
declare <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8>, <16 x i8>)
define <16 x i8> @test_vgf2p8mulb_128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i16 %mask) {
; CHECK-LABEL: test_vgf2p8mulb_128:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8mulb %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xcf,0xd9]
; CHECK-NEXT: vgf2p8mulb %xmm1, %xmm0, %xmm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0xcf,0xe1]
@@ -144,7 +144,7 @@ define <16 x i8> @test_vgf2p8mulb_128(<1
declare <32 x i8> @llvm.x86.vgf2p8mulb.256(<32 x i8>, <32 x i8>)
define <32 x i8> @test_vgf2p8mulb_256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i32 %mask) {
; CHECK-LABEL: test_vgf2p8mulb_256:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8mulb %ymm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xcf,0xd9]
; CHECK-NEXT: vgf2p8mulb %ymm1, %ymm0, %ymm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0xcf,0xe1]
@@ -164,7 +164,7 @@ define <32 x i8> @test_vgf2p8mulb_256(<3
declare <64 x i8> @llvm.x86.vgf2p8mulb.512(<64 x i8>, <64 x i8>)
define <64 x i8> @test_vgf2p8mulb_512(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru, i64 %mask) {
; CHECK-LABEL: test_vgf2p8mulb_512:
-; CHECK: ## BB#0:
+; CHECK: ## %bb.0:
; CHECK-NEXT: kmovq %rdi, %k1 ## encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
; CHECK-NEXT: vgf2p8mulb %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0x7d,0x48,0xcf,0xd9]
; CHECK-NEXT: vgf2p8mulb %zmm1, %zmm0, %zmm4 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0xcf,0xe1]
Modified: llvm/trunk/test/CodeGen/X86/avx512-hadd-hsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-hadd-hsub.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-hadd-hsub.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-hadd-hsub.ll Mon Dec 4 09:18:51 2017
@@ -4,7 +4,7 @@
define i32 @hadd_16(<16 x i32> %x225) {
; KNL-LABEL: hadd_16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; KNL-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -13,7 +13,7 @@ define i32 @hadd_16(<16 x i32> %x225) {
; KNL-NEXT: retq
;
; SKX-LABEL: hadd_16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SKX-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -31,7 +31,7 @@ define i32 @hadd_16(<16 x i32> %x225) {
define i32 @hsub_16(<16 x i32> %x225) {
; KNL-LABEL: hsub_16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; KNL-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -40,7 +40,7 @@ define i32 @hsub_16(<16 x i32> %x225) {
; KNL-NEXT: retq
;
; SKX-LABEL: hsub_16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SKX-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
@@ -58,7 +58,7 @@ define i32 @hsub_16(<16 x i32> %x225) {
define float @fhadd_16(<16 x float> %x225) {
; KNL-LABEL: fhadd_16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0
; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
@@ -67,7 +67,7 @@ define float @fhadd_16(<16 x float> %x22
; KNL-NEXT: retq
;
; SKX-LABEL: fhadd_16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0
; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
@@ -85,7 +85,7 @@ define float @fhadd_16(<16 x float> %x22
define float @fhsub_16(<16 x float> %x225) {
; KNL-LABEL: fhsub_16:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0
; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
@@ -94,7 +94,7 @@ define float @fhsub_16(<16 x float> %x22
; KNL-NEXT: retq
;
; SKX-LABEL: fhsub_16:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0
; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
@@ -112,14 +112,14 @@ define float @fhsub_16(<16 x float> %x22
define <16 x i32> @hadd_16_3(<16 x i32> %x225, <16 x i32> %x227) {
; KNL-LABEL: hadd_16_3:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vshufps {{.*#+}} ymm2 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
; KNL-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
; KNL-NEXT: vpaddd %zmm0, %zmm2, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: hadd_16_3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufps {{.*#+}} ymm2 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
; SKX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
; SKX-NEXT: vpaddd %zmm0, %zmm2, %zmm0
@@ -135,14 +135,14 @@ define <16 x i32> @hadd_16_3(<16 x i32>
define <16 x float> @fhadd_16_3(<16 x float> %x225, <16 x float> %x227) {
; KNL-LABEL: fhadd_16_3:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vshufps {{.*#+}} ymm2 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
; KNL-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
; KNL-NEXT: vaddps %zmm0, %zmm2, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: fhadd_16_3:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufps {{.*#+}} ymm2 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
; SKX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
; SKX-NEXT: vaddps %zmm0, %zmm2, %zmm0
@@ -157,14 +157,14 @@ define <16 x float> @fhadd_16_3(<16 x fl
define <8 x double> @fhadd_16_4(<8 x double> %x225, <8 x double> %x227) {
; KNL-LABEL: fhadd_16_4:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; KNL-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: fhadd_16_4:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; SKX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0
@@ -177,7 +177,7 @@ define <8 x double> @fhadd_16_4(<8 x dou
define <4 x double> @fadd_noundef_low(<8 x double> %x225, <8 x double> %x227) {
; KNL-LABEL: fadd_noundef_low:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0
@@ -185,7 +185,7 @@ define <4 x double> @fadd_noundef_low(<8
; KNL-NEXT: retq
;
; SKX-LABEL: fadd_noundef_low:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0
@@ -200,7 +200,7 @@ define <4 x double> @fadd_noundef_low(<8
define <4 x double> @fadd_noundef_high(<8 x double> %x225, <8 x double> %x227) {
; KNL-LABEL: fadd_noundef_high:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0
@@ -208,7 +208,7 @@ define <4 x double> @fadd_noundef_high(<
; KNL-NEXT: retq
;
; SKX-LABEL: fadd_noundef_high:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0
@@ -224,7 +224,7 @@ define <4 x double> @fadd_noundef_high(<
define <8 x i32> @hadd_16_3_sv(<16 x i32> %x225, <16 x i32> %x227) {
; KNL-LABEL: hadd_16_3_sv:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vshufps {{.*#+}} zmm2 = zmm0[0,2],zmm1[0,2],zmm0[4,6],zmm1[4,6],zmm0[8,10],zmm1[8,10],zmm0[12,14],zmm1[12,14]
; KNL-NEXT: vshufps {{.*#+}} zmm0 = zmm0[1,3],zmm1[1,3],zmm0[5,7],zmm1[5,7],zmm0[9,11],zmm1[9,11],zmm0[13,15],zmm1[13,15]
; KNL-NEXT: vpaddd %zmm0, %zmm2, %zmm0
@@ -232,7 +232,7 @@ define <8 x i32> @hadd_16_3_sv(<16 x i32
; KNL-NEXT: retq
;
; SKX-LABEL: hadd_16_3_sv:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vshufps {{.*#+}} zmm2 = zmm0[0,2],zmm1[0,2],zmm0[4,6],zmm1[4,6],zmm0[8,10],zmm1[8,10],zmm0[12,14],zmm1[12,14]
; SKX-NEXT: vshufps {{.*#+}} zmm0 = zmm0[1,3],zmm1[1,3],zmm0[5,7],zmm1[5,7],zmm0[9,11],zmm1[9,11],zmm0[13,15],zmm1[13,15]
; SKX-NEXT: vpaddd %zmm0, %zmm2, %zmm0
@@ -251,7 +251,7 @@ define <8 x i32> @hadd_16_3_sv(<16 x i32
define double @fadd_noundef_eel(<8 x double> %x225, <8 x double> %x227) {
; KNL-LABEL: fadd_noundef_eel:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0
@@ -259,7 +259,7 @@ define double @fadd_noundef_eel(<8 x dou
; KNL-NEXT: retq
;
; SKX-LABEL: fadd_noundef_eel:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0
@@ -277,7 +277,7 @@ define double @fadd_noundef_eel(<8 x dou
define double @fsub_noundef_ee (<8 x double> %x225, <8 x double> %x227) {
; KNL-LABEL: fsub_noundef_ee:
-; KNL: # BB#0:
+; KNL: # %bb.0:
; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; KNL-NEXT: vsubpd %zmm0, %zmm2, %zmm0
@@ -286,7 +286,7 @@ define double @fsub_noundef_ee (<8 x dou
; KNL-NEXT: retq
;
; SKX-LABEL: fsub_noundef_ee:
-; SKX: # BB#0:
+; SKX: # %bb.0:
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; SKX-NEXT: vsubpd %zmm0, %zmm2, %zmm0
Modified: llvm/trunk/test/CodeGen/X86/avx512-i1test.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-i1test.ll?rev=319665&r1=319664&r2=319665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-i1test.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-i1test.ll Mon Dec 4 09:18:51 2017
@@ -7,11 +7,11 @@ target triple = "x86_64-unknown-linux-gn
define void @func() {
; CHECK-LABEL: func:
-; CHECK: # BB#0: # %L_10
+; CHECK: # %bb.0: # %L_10
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: je .LBB0_1
-; CHECK-NEXT: # BB#4: # %L_30
+; CHECK-NEXT: # %bb.4: # %L_30
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB0_1: # %bb56
; CHECK-NEXT: xorl %eax, %eax
@@ -65,10 +65,10 @@ L_30:
; PR 28175
define i64 @func2(i1 zeroext %i, i32 %j) {
; CHECK-LABEL: func2:
-; CHECK: # BB#0: # %entry
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: testl %esi, %esi
; CHECK-NEXT: je .LBB1_1
-; CHECK-NEXT: # BB#2: # %if.then
+; CHECK-NEXT: # %bb.2: # %if.then
; CHECK-NEXT: jmp bar # TAILCALL
; CHECK-NEXT: .LBB1_1: # %return
; CHECK-NEXT: movzbl %dil, %eax
More information about the llvm-commits
mailing list