[llvm] r351521 - [X86] Change avx512-gather-scatter-intrin.ll to use x86_64-unknown-unknown instead of x86_64-apple-darwin. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 17 22:06:02 PST 2019


Author: ctopper
Date: Thu Jan 17 22:06:01 2019
New Revision: 351521

URL: http://llvm.org/viewvc/llvm-project?rev=351521&view=rev
Log:
[X86] Change avx512-gather-scatter-intrin.ll to use x86_64-unknown-unknown instead of x86_64-apple-darwin. NFC

Will help with an upcoming patch.

Modified:
    llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll

Modified: llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll?rev=351521&r1=351520&r2=351521&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll Thu Jan 17 22:06:01 2019
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq | FileCheck %s
 
 define void @gather_mask_dps(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_mask_dps:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    kmovq %k1, %k2
 ; CHECK-NEXT:    vgatherdps (%rsi,%zmm0,4), %zmm1 {%k2}
@@ -20,7 +20,7 @@ define void @gather_mask_dps(<16 x i32>
 
 define void @gather_mask_dpd(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_mask_dpd:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    kmovq %k1, %k2
 ; CHECK-NEXT:    vgatherdpd (%rsi,%ymm0,4), %zmm1 {%k2}
@@ -37,7 +37,7 @@ define void @gather_mask_dpd(<8 x i32> %
 
 define void @gather_mask_qps(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_mask_qps:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    kmovq %k1, %k2
 ; CHECK-NEXT:    vgatherqps (%rsi,%zmm0,4), %ymm1 {%k2}
@@ -54,7 +54,7 @@ define void @gather_mask_qps(<8 x i64> %
 
 define void @gather_mask_qpd(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_mask_qpd:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    kmovq %k1, %k2
 ; CHECK-NEXT:    vgatherqpd (%rsi,%zmm0,4), %zmm1 {%k2}
@@ -74,7 +74,7 @@ define void @gather_mask_qpd(<8 x i64> %
 
 define void @gather_mask_dd(<16 x i32> %ind, <16 x i32> %src, i16 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_mask_dd:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    kmovq %k1, %k2
 ; CHECK-NEXT:    vpgatherdd (%rsi,%zmm0,4), %zmm1 {%k2}
@@ -91,7 +91,7 @@ define void @gather_mask_dd(<16 x i32> %
 
 define void @gather_mask_qd(<8 x i64> %ind, <8 x i32> %src, i8 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_mask_qd:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    kmovq %k1, %k2
 ; CHECK-NEXT:    vpgatherqd (%rsi,%zmm0,4), %ymm1 {%k2}
@@ -108,7 +108,7 @@ define void @gather_mask_qd(<8 x i64> %i
 
 define void @gather_mask_qq(<8 x i64> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_mask_qq:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    kmovq %k1, %k2
 ; CHECK-NEXT:    vpgatherqq (%rsi,%zmm0,4), %zmm1 {%k2}
@@ -125,7 +125,7 @@ define void @gather_mask_qq(<8 x i64> %i
 
 define void @gather_mask_dq(<8 x i32> %ind, <8 x i64> %src, i8 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_mask_dq:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    kmovq %k1, %k2
 ; CHECK-NEXT:    vpgatherdq (%rsi,%ymm0,4), %zmm1 {%k2}
@@ -142,7 +142,7 @@ define void @gather_mask_dq(<8 x i32> %i
 
 define void @gather_mask_dpd_execdomain(<8 x i32> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf) {
 ; CHECK-LABEL: gather_mask_dpd_execdomain:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vgatherdpd (%rsi,%ymm0,4), %zmm1 {%k1}
 ; CHECK-NEXT:    vmovapd %zmm1, (%rdx)
@@ -156,7 +156,7 @@ define void @gather_mask_dpd_execdomain(
 
 define void @gather_mask_qpd_execdomain(<8 x i64> %ind, <8 x double> %src, i8 %mask, i8* %base, <8 x double>* %stbuf) {
 ; CHECK-LABEL: gather_mask_qpd_execdomain:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vgatherqpd (%rsi,%zmm0,4), %zmm1 {%k1}
 ; CHECK-NEXT:    vmovapd %zmm1, (%rdx)
@@ -170,7 +170,7 @@ define void @gather_mask_qpd_execdomain(
 
 define <16 x float> @gather_mask_dps_execdomain(<16 x i32> %ind, <16 x float> %src, i16 %mask, i8* %base) {
 ; CHECK-LABEL: gather_mask_dps_execdomain:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vgatherdps (%rsi,%zmm0,4), %zmm1 {%k1}
 ; CHECK-NEXT:    vmovaps %zmm1, %zmm0
@@ -182,7 +182,7 @@ define <16 x float> @gather_mask_dps_exe
 
 define <8 x float> @gather_mask_qps_execdomain(<8 x i64> %ind, <8 x float> %src, i8 %mask, i8* %base) {
 ; CHECK-LABEL: gather_mask_qps_execdomain:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %edi, %k1
 ; CHECK-NEXT:    vgatherqps (%rsi,%zmm0,4), %ymm1 {%k1}
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm0
@@ -194,7 +194,7 @@ define <8 x float> @gather_mask_qps_exec
 
 define void @scatter_mask_dpd_execdomain(<8 x i32> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: scatter_mask_dpd_execdomain:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vmovapd (%rdi), %zmm1
 ; CHECK-NEXT:    vscatterdpd %zmm1, (%rcx,%ymm0,4) {%k1}
@@ -208,7 +208,7 @@ define void @scatter_mask_dpd_execdomain
 
 define void @scatter_mask_qpd_execdomain(<8 x i64> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf)  {
 ; CHECK-LABEL: scatter_mask_qpd_execdomain:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vmovapd (%rdi), %zmm1
 ; CHECK-NEXT:    vscatterqpd %zmm1, (%rcx,%zmm0,4) {%k1}
@@ -222,7 +222,7 @@ define void @scatter_mask_qpd_execdomain
 
 define void @scatter_mask_dps_execdomain(<16 x i32> %ind, <16 x float>* %src, i16 %mask, i8* %base, i8* %stbuf)  {
 ; CHECK-LABEL: scatter_mask_dps_execdomain:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vmovaps (%rdi), %zmm1
 ; CHECK-NEXT:    vscatterdps %zmm1, (%rcx,%zmm0,4) {%k1}
@@ -236,7 +236,7 @@ define void @scatter_mask_dps_execdomain
 
 define void @scatter_mask_qps_execdomain(<8 x i64> %ind, <8 x float>* %src, i8 %mask, i8* %base, i8* %stbuf)  {
 ; CHECK-LABEL: scatter_mask_qps_execdomain:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vmovaps (%rdi), %ymm1
 ; CHECK-NEXT:    vscatterqps %ymm1, (%rcx,%zmm0,4) {%k1}
@@ -250,7 +250,7 @@ define void @scatter_mask_qps_execdomain
 
 define void @gather_qps(<8 x i64> %ind, <8 x float> %src, i8* %base, i8* %stbuf) {
 ; CHECK-LABEL: gather_qps:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
 ; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k2
@@ -269,7 +269,7 @@ declare  void @llvm.x86.avx512.gatherpf.
 declare  void @llvm.x86.avx512.scatterpf.qps.512(i8, <8 x i64>, i8* , i32, i32);
 define void @prefetch(<8 x i64> %ind, i8* %base) {
 ; CHECK-LABEL: prefetch:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
 ; CHECK-NEXT:    vgatherpf0qps (%rdi,%zmm0,4) {%k1}
 ; CHECK-NEXT:    kxorw %k0, %k0, %k1
@@ -291,7 +291,7 @@ define void @prefetch(<8 x i64> %ind, i8
 
 define <2 x double> @test_int_x86_avx512_mask_gather3div2_df(<2 x double> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_gather3div2_df:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherqpd (%rdi,%xmm1,4), %xmm0 {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -309,7 +309,7 @@ define <2 x double> @test_int_x86_avx512
 
 define <2 x i64> @test_int_x86_avx512_mask_gather3div2_di(<2 x i64> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_gather3div2_di:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vpgatherqq (%rdi,%xmm1,8), %xmm0 {%k1}
 ; CHECK-NEXT:    vpaddq %xmm0, %xmm0, %xmm0
@@ -326,7 +326,7 @@ define <2 x i64> @test_int_x86_avx512_ma
 
 define <4 x double> @test_int_x86_avx512_mask_gather3div4_df(<4 x double> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_gather3div4_df:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherqpd (%rdi,%ymm1,4), %ymm0 {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -344,7 +344,7 @@ define <4 x double> @test_int_x86_avx512
 
 define <4 x i64> @test_int_x86_avx512_mask_gather3div4_di(<4 x i64> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_gather3div4_di:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vpgatherqq (%rdi,%ymm1,8), %ymm0 {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -362,7 +362,7 @@ define <4 x i64> @test_int_x86_avx512_ma
 
 define <4 x float> @test_int_x86_avx512_mask_gather3div4_sf(<4 x float> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_gather3div4_sf:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherqps (%rdi,%xmm1,4), %xmm0 {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -380,7 +380,7 @@ define <4 x float> @test_int_x86_avx512_
 
 define <4 x i32> @test_int_x86_avx512_mask_gather3div4_si(<4 x i32> %x0, i8* %x1, <2 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_gather3div4_si:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
 ; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpgatherqd (%rdi,%xmm1,4), %xmm2 {%k1}
@@ -398,7 +398,7 @@ define <4 x i32> @test_int_x86_avx512_ma
 
 define <4 x float> @test_int_x86_avx512_mask_gather3div8_sf(<4 x float> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_gather3div8_sf:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherqps (%rdi,%ymm1,4), %xmm0 {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -417,7 +417,7 @@ define <4 x float> @test_int_x86_avx512_
 
 define <4 x i32> @test_int_x86_avx512_mask_gather3div8_si(<4 x i32> %x0, i8* %x1, <4 x i64> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_gather3div8_si:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vmovdqa %xmm0, %xmm2
 ; CHECK-NEXT:    kmovq %k1, %k2
@@ -438,7 +438,7 @@ define <4 x i32> @test_int_x86_avx512_ma
 
 define <2 x double> @test_int_x86_avx512_mask_gather3siv2_df(<2 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_gather3siv2_df:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherdpd (%rdi,%xmm1,4), %xmm0 {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -456,7 +456,7 @@ define <2 x double> @test_int_x86_avx512
 
 define <2 x i64> @test_int_x86_avx512_mask_gather3siv2_di(<2 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_gather3siv2_di:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vpgatherdq (%rdi,%xmm1,8), %xmm0 {%k1}
 ; CHECK-NEXT:    vpaddq %xmm0, %xmm0, %xmm0
@@ -473,7 +473,7 @@ define <2 x i64> @test_int_x86_avx512_ma
 
 define <4 x double> @test_int_x86_avx512_mask_gather3siv4_df(<4 x double> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_gather3siv4_df:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherdpd (%rdi,%xmm1,4), %ymm0 {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -491,7 +491,7 @@ define <4 x double> @test_int_x86_avx512
 
 define <4 x i64> @test_int_x86_avx512_mask_gather3siv4_di(<4 x i64> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_gather3siv4_di:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vpgatherdq (%rdi,%xmm1,8), %ymm0 {%k1}
 ; CHECK-NEXT:    vpaddq %ymm0, %ymm0, %ymm0
@@ -508,7 +508,7 @@ define <4 x i64> @test_int_x86_avx512_ma
 
 define <4 x float> @test_int_x86_avx512_mask_gather3siv4_sf(<4 x float> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_gather3siv4_sf:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherdps (%rdi,%xmm1,4), %xmm0 {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -526,7 +526,7 @@ define <4 x float> @test_int_x86_avx512_
 
 define <4 x i32> @test_int_x86_avx512_mask_gather3siv4_si(<4 x i32> %x0, i8* %x1, <4 x i32> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_gather3siv4_si:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
 ; CHECK-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vpgatherdd (%rdi,%xmm1,4), %xmm2 {%k1}
@@ -544,7 +544,7 @@ define <4 x i32> @test_int_x86_avx512_ma
 
 define <8 x float> @test_int_x86_avx512_mask_gather3siv8_sf(<8 x float> %x0, i8* %x1, <8 x i32> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_gather3siv8_sf:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vgatherdps (%rdi,%ymm1,4), %ymm0 {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -561,7 +561,7 @@ define <8 x float> @test_int_x86_avx512_
 
 define <8 x i32> @test_int_x86_avx512_mask_gather3siv8_si(<8 x i32> %x0, i8* %x1, <8 x i32> %x2, i8 %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_mask_gather3siv8_si:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vmovdqa %ymm0, %ymm2
 ; CHECK-NEXT:    kmovq %k1, %k2
@@ -579,7 +579,7 @@ define <8 x i32> @test_int_x86_avx512_ma
 
 define void at test_int_x86_avx512_scatterdiv2_df(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x double> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scatterdiv2_df:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k2
 ; CHECK-NEXT:    vscatterqpd %xmm1, (%rdi,%xmm0,2) {%k2}
@@ -594,7 +594,7 @@ define void at test_int_x86_avx512_scatterd
 
 define void at test_int_x86_avx512_scatterdiv2_di(i8* %x0, i8 %x1, <2 x i64> %x2, <2 x i64> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scatterdiv2_di:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vpscatterqq %xmm1, (%rdi,%xmm0,2) {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -609,7 +609,7 @@ define void at test_int_x86_avx512_scatterd
 
 define void at test_int_x86_avx512_scatterdiv4_df(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x double> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_df:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vscatterqpd %ymm1, (%rdi,%ymm0,2) {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -625,7 +625,7 @@ define void at test_int_x86_avx512_scatterd
 
 define void at test_int_x86_avx512_scatterdiv4_di(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i64> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_di:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vpscatterqq %ymm1, (%rdi,%ymm0,2) {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -641,7 +641,7 @@ define void at test_int_x86_avx512_scatterd
 
 define void at test_int_x86_avx512_scatterdiv4_sf(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x float> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_sf:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vscatterqps %xmm1, (%rdi,%xmm0,2) {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -656,7 +656,7 @@ define void at test_int_x86_avx512_scatterd
 
 define void at test_int_x86_avx512_scatterdiv4_si(i8* %x0, i8 %x1, <2 x i64> %x2, <4 x i32> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scatterdiv4_si:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k2
 ; CHECK-NEXT:    vpscatterqd %xmm1, (%rdi,%xmm0,2) {%k2}
@@ -671,7 +671,7 @@ define void at test_int_x86_avx512_scatterd
 
 define void at test_int_x86_avx512_scatterdiv8_sf(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x float> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scatterdiv8_sf:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vscatterqps %xmm1, (%rdi,%ymm0,2) {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -687,7 +687,7 @@ define void at test_int_x86_avx512_scatterd
 
 define void at test_int_x86_avx512_scatterdiv8_si(i8* %x0, i8 %x1, <4 x i64> %x2, <4 x i32> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scatterdiv8_si:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vpscatterqd %xmm1, (%rdi,%ymm0,2) {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -703,7 +703,7 @@ define void at test_int_x86_avx512_scatterd
 
 define void at test_int_x86_avx512_scattersiv2_df(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x double> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scattersiv2_df:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k2
 ; CHECK-NEXT:    vscatterdpd %xmm1, (%rdi,%xmm0,2) {%k2}
@@ -718,7 +718,7 @@ define void at test_int_x86_avx512_scatters
 
 define void at test_int_x86_avx512_scattersiv2_di(i8* %x0, i8 %x1, <4 x i32> %x2, <2 x i64> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scattersiv2_di:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k2
 ; CHECK-NEXT:    vpscatterdq %xmm1, (%rdi,%xmm0,2) {%k2}
@@ -733,7 +733,7 @@ define void at test_int_x86_avx512_scatters
 
 define void at test_int_x86_avx512_scattersiv4_df(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x double> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scattersiv4_df:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vscatterdpd %ymm1, (%rdi,%xmm0,2) {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -749,7 +749,7 @@ define void at test_int_x86_avx512_scatters
 
 define void at test_int_x86_avx512_scattersiv4_di(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i64> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scattersiv4_di:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k2
 ; CHECK-NEXT:    vpscatterdq %ymm1, (%rdi,%xmm0,2) {%k2}
@@ -765,7 +765,7 @@ define void at test_int_x86_avx512_scatters
 
 define void at test_int_x86_avx512_scattersiv4_sf(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x float> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scattersiv4_sf:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vscatterdps %xmm1, (%rdi,%xmm0,2) {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -780,7 +780,7 @@ define void at test_int_x86_avx512_scatters
 
 define void at test_int_x86_avx512_scattersiv4_si(i8* %x0, i8 %x1, <4 x i32> %x2, <4 x i32> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scattersiv4_si:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vpscatterdd %xmm1, (%rdi,%xmm0,2) {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -795,7 +795,7 @@ define void at test_int_x86_avx512_scatters
 
 define void at test_int_x86_avx512_scattersiv8_sf(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x float> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scattersiv8_sf:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vscatterdps %ymm1, (%rdi,%ymm0,2) {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -810,7 +810,7 @@ define void at test_int_x86_avx512_scatters
 
 define void at test_int_x86_avx512_scattersiv8_si(i8* %x0, i8 %x1, <8 x i32> %x2, <8 x i32> %x3) {
 ; CHECK-LABEL: test_int_x86_avx512_scattersiv8_si:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kmovd %esi, %k1
 ; CHECK-NEXT:    vpscatterdd %ymm1, (%rdi,%ymm0,2) {%k1}
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
@@ -825,7 +825,7 @@ define void at test_int_x86_avx512_scatters
 
 define void @scatter_mask_test(i8* %x0, <8 x i32> %x2, <8 x i32> %x3) {
 ; CHECK-LABEL: scatter_mask_test:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
 ; CHECK-NEXT:    vpscatterdd %ymm1, (%rdi,%ymm0,2) {%k1}
 ; CHECK-NEXT:    kxorw %k0, %k0, %k1
@@ -847,7 +847,7 @@ define void @scatter_mask_test(i8* %x0,
 
 define <16 x float> @gather_mask_test(<16 x i32> %ind, <16 x float> %src, i8* %base) {
 ; CHECK-LABEL: gather_mask_test:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    kxnorw %k0, %k0, %k1
 ; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; CHECK-NEXT:    vgatherdps (%rdi,%zmm0,4), %zmm2 {%k1}




More information about the llvm-commits mailing list