[llvm] f1e3a8f - [X86] avx2-gather.ll - replace X32 checks with X86. NFC.

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 8 04:38:35 PST 2024


Author: Simon Pilgrim
Date: 2024-01-08T12:37:06Z
New Revision: f1e3a8f1eb7877b07d386af1a02cd7578a76c7d1

URL: https://github.com/llvm/llvm-project/commit/f1e3a8f1eb7877b07d386af1a02cd7578a76c7d1
DIFF: https://github.com/llvm/llvm-project/commit/f1e3a8f1eb7877b07d386af1a02cd7578a76c7d1.diff

LOG: [X86] avx2-gather.ll - replace X32 checks with X86. NFC.

We try to use X32 for gnux32 triples only.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/avx2-gather.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/avx2-gather.ll b/llvm/test/CodeGen/X86/avx2-gather.ll
index e02ae09a0981b9..4b77edefa820db 100644
--- a/llvm/test/CodeGen/X86/avx2-gather.ll
+++ b/llvm/test/CodeGen/X86/avx2-gather.ll
@@ -1,18 +1,18 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X86
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
 
 declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, ptr,
                       <4 x i32>, <4 x float>, i8) nounwind readonly
 
 define <4 x float> @test_x86_avx2_gather_d_ps(ptr %a1, <4 x i32> %idx, <4 x float> %mask) {
-; X32-LABEL: test_x86_avx2_gather_d_ps:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; X32-NEXT:    vgatherdps %xmm1, (%eax,%xmm0,2), %xmm2
-; X32-NEXT:    vmovaps %xmm2, %xmm0
-; X32-NEXT:    retl
+; X86-LABEL: test_x86_avx2_gather_d_ps:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; X86-NEXT:    vgatherdps %xmm1, (%eax,%xmm0,2), %xmm2
+; X86-NEXT:    vmovaps %xmm2, %xmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_x86_avx2_gather_d_ps:
 ; X64:       # %bb.0:
@@ -29,13 +29,13 @@ declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, ptr,
                       <4 x i32>, <2 x double>, i8) nounwind readonly
 
 define <2 x double> @test_x86_avx2_gather_d_pd(ptr %a1, <4 x i32> %idx, <2 x double> %mask) {
-; X32-LABEL: test_x86_avx2_gather_d_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
-; X32-NEXT:    vgatherdpd %xmm1, (%eax,%xmm0,2), %xmm2
-; X32-NEXT:    vmovapd %xmm2, %xmm0
-; X32-NEXT:    retl
+; X86-LABEL: test_x86_avx2_gather_d_pd:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
+; X86-NEXT:    vgatherdpd %xmm1, (%eax,%xmm0,2), %xmm2
+; X86-NEXT:    vmovapd %xmm2, %xmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_x86_avx2_gather_d_pd:
 ; X64:       # %bb.0:
@@ -52,13 +52,13 @@ declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, ptr,
                       <8 x i32>, <8 x float>, i8) nounwind readonly
 
 define <8 x float> @test_x86_avx2_gather_d_ps_256(ptr %a1, <8 x i32> %idx, <8 x float> %mask) {
-; X32-LABEL: test_x86_avx2_gather_d_ps_256:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; X32-NEXT:    vgatherdps %ymm1, (%eax,%ymm0,4), %ymm2
-; X32-NEXT:    vmovaps %ymm2, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: test_x86_avx2_gather_d_ps_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; X86-NEXT:    vgatherdps %ymm1, (%eax,%ymm0,4), %ymm2
+; X86-NEXT:    vmovaps %ymm2, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_x86_avx2_gather_d_ps_256:
 ; X64:       # %bb.0:
@@ -75,13 +75,13 @@ declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, ptr,
                       <4 x i32>, <4 x double>, i8) nounwind readonly
 
 define <4 x double> @test_x86_avx2_gather_d_pd_256(ptr %a1, <4 x i32> %idx, <4 x double> %mask) {
-; X32-LABEL: test_x86_avx2_gather_d_pd_256:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
-; X32-NEXT:    vgatherdpd %ymm1, (%eax,%xmm0,8), %ymm2
-; X32-NEXT:    vmovapd %ymm2, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: test_x86_avx2_gather_d_pd_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
+; X86-NEXT:    vgatherdpd %ymm1, (%eax,%xmm0,8), %ymm2
+; X86-NEXT:    vmovapd %ymm2, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_x86_avx2_gather_d_pd_256:
 ; X64:       # %bb.0:
@@ -95,14 +95,14 @@ define <4 x double> @test_x86_avx2_gather_d_pd_256(ptr %a1, <4 x i32> %idx, <4 x
 }
 
 define <2 x i64> @test_mm_i32gather_epi32(ptr%a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_i32gather_epi32:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; X32-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; X32-NEXT:    vpgatherdd %xmm2, (%eax,%xmm0,2), %xmm1
-; X32-NEXT:    vmovdqa %xmm1, %xmm0
-; X32-NEXT:    retl
+; X86-LABEL: test_mm_i32gather_epi32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; X86-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; X86-NEXT:    vpgatherdd %xmm2, (%eax,%xmm0,2), %xmm1
+; X86-NEXT:    vmovdqa %xmm1, %xmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_i32gather_epi32:
 ; X64:       # %bb.0:
@@ -121,14 +121,14 @@ define <2 x i64> @test_mm_i32gather_epi32(ptr%a0, <2 x i64> %a1) {
 declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, ptr, <4 x i32>, <4 x i32>, i8) nounwind readonly
 
 define <2 x double> @test_mm_i32gather_pd(ptr%a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_i32gather_pd:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; X32-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; X32-NEXT:    vgatherdpd %xmm2, (%eax,%xmm0,2), %xmm1
-; X32-NEXT:    vmovapd %xmm1, %xmm0
-; X32-NEXT:    retl
+; X86-LABEL: test_mm_i32gather_pd:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
+; X86-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; X86-NEXT:    vgatherdpd %xmm2, (%eax,%xmm0,2), %xmm1
+; X86-NEXT:    vmovapd %xmm1, %xmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_i32gather_pd:
 ; X64:       # %bb.0:
@@ -149,14 +149,14 @@ define <2 x double> @test_mm_i32gather_pd(ptr%a0, <2 x i64> %a1) {
 @x = dso_local global [1024 x float] zeroinitializer, align 16
 
 define <4 x float> @gather_global(<4 x i64>, ptr nocapture readnone) {
-; X32-LABEL: gather_global:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; X32-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-NEXT:    vgatherqps %xmm2, x(,%ymm0,4), %xmm1
-; X32-NEXT:    vmovaps %xmm1, %xmm0
-; X32-NEXT:    vzeroupper
-; X32-NEXT:    retl
+; X86-LABEL: gather_global:
+; X86:       # %bb.0:
+; X86-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; X86-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-NEXT:    vgatherqps %xmm2, x(,%ymm0,4), %xmm1
+; X86-NEXT:    vmovaps %xmm1, %xmm0
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: gather_global:
 ; X64:       # %bb.0:


        


More information about the llvm-commits mailing list