[llvm] 06f5b95 - [X86] pmovsx-inreg.ll - replace X32 check prefixes with X86

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 29 06:23:32 PST 2024


Author: Simon Pilgrim
Date: 2024-01-29T14:23:08Z
New Revision: 06f5b956a0ba3bb1cad7d94707398a97750d8b37

URL: https://github.com/llvm/llvm-project/commit/06f5b956a0ba3bb1cad7d94707398a97750d8b37
DIFF: https://github.com/llvm/llvm-project/commit/06f5b956a0ba3bb1cad7d94707398a97750d8b37.diff

LOG: [X86] pmovsx-inreg.ll - replace X32 check prefixes with X86

We try to only use X32 for gnux32 triple tests.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/pmovsx-inreg.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/pmovsx-inreg.ll b/llvm/test/CodeGen/X86/pmovsx-inreg.ll
index 76b7fae045eca3..a39ea60331a5e5 100644
--- a/llvm/test/CodeGen/X86/pmovsx-inreg.ll
+++ b/llvm/test/CodeGen/X86/pmovsx-inreg.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknwon -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknwon -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknwon -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=i686-unknwon -mattr=+avx2 | FileCheck %s --check-prefix=X32-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknwon -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknwon -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknwon -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=i686-unknwon -mattr=+avx2 | FileCheck %s --check-prefixes=X86-AVX2
 
 ; PR14887
 ; These tests inject a store into the chain to test the inreg versions of pmovsx
@@ -24,15 +24,15 @@ define void @test1(ptr %in, ptr %out) nounwind {
 ; AVX-NEXT:    vmovdqu %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
-; X32-AVX2-LABEL: test1:
-; X32-AVX2:       # %bb.0:
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-AVX2-NEXT:    vpmovsxbq (%ecx), %xmm0
-; X32-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX2-NEXT:    vmovups %xmm1, (%eax)
-; X32-AVX2-NEXT:    vmovdqu %xmm0, (%eax)
-; X32-AVX2-NEXT:    retl
+; X86-AVX2-LABEL: test1:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vpmovsxbq (%ecx), %xmm0
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX2-NEXT:    vmovups %xmm1, (%eax)
+; X86-AVX2-NEXT:    vmovdqu %xmm0, (%eax)
+; X86-AVX2-NEXT:    retl
   %wide.load35 = load <2 x i8>, ptr %in, align 1
   %sext = sext <2 x i8> %wide.load35 to <2 x i64>
   store <2 x i64> zeroinitializer, ptr undef, align 8
@@ -71,16 +71,16 @@ define void @test2(ptr %in, ptr %out) nounwind {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; X32-AVX2-LABEL: test2:
-; X32-AVX2:       # %bb.0:
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-AVX2-NEXT:    vpmovsxbq (%ecx), %ymm0
-; X32-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX2-NEXT:    vmovups %ymm1, (%eax)
-; X32-AVX2-NEXT:    vmovdqu %ymm0, (%eax)
-; X32-AVX2-NEXT:    vzeroupper
-; X32-AVX2-NEXT:    retl
+; X86-AVX2-LABEL: test2:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vpmovsxbq (%ecx), %ymm0
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX2-NEXT:    vmovups %ymm1, (%eax)
+; X86-AVX2-NEXT:    vmovdqu %ymm0, (%eax)
+; X86-AVX2-NEXT:    vzeroupper
+; X86-AVX2-NEXT:    retl
   %wide.load35 = load <4 x i8>, ptr %in, align 1
   %sext = sext <4 x i8> %wide.load35 to <4 x i64>
   store <4 x i64> zeroinitializer, ptr undef, align 8
@@ -105,15 +105,15 @@ define void @test3(ptr %in, ptr %out) nounwind {
 ; AVX-NEXT:    vmovdqu %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
-; X32-AVX2-LABEL: test3:
-; X32-AVX2:       # %bb.0:
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-AVX2-NEXT:    vpmovsxbd (%ecx), %xmm0
-; X32-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX2-NEXT:    vmovups %xmm1, (%eax)
-; X32-AVX2-NEXT:    vmovdqu %xmm0, (%eax)
-; X32-AVX2-NEXT:    retl
+; X86-AVX2-LABEL: test3:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vpmovsxbd (%ecx), %xmm0
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX2-NEXT:    vmovups %xmm1, (%eax)
+; X86-AVX2-NEXT:    vmovdqu %xmm0, (%eax)
+; X86-AVX2-NEXT:    retl
   %wide.load35 = load <4 x i8>, ptr %in, align 1
   %sext = sext <4 x i8> %wide.load35 to <4 x i32>
   store <4 x i32> zeroinitializer, ptr undef, align 8
@@ -152,16 +152,16 @@ define void @test4(ptr %in, ptr %out) nounwind {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; X32-AVX2-LABEL: test4:
-; X32-AVX2:       # %bb.0:
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-AVX2-NEXT:    vpmovsxbd (%ecx), %ymm0
-; X32-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX2-NEXT:    vmovups %ymm1, (%eax)
-; X32-AVX2-NEXT:    vmovdqu %ymm0, (%eax)
-; X32-AVX2-NEXT:    vzeroupper
-; X32-AVX2-NEXT:    retl
+; X86-AVX2-LABEL: test4:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vpmovsxbd (%ecx), %ymm0
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX2-NEXT:    vmovups %ymm1, (%eax)
+; X86-AVX2-NEXT:    vmovdqu %ymm0, (%eax)
+; X86-AVX2-NEXT:    vzeroupper
+; X86-AVX2-NEXT:    retl
   %wide.load35 = load <8 x i8>, ptr %in, align 1
   %sext = sext <8 x i8> %wide.load35 to <8 x i32>
   store <8 x i32> zeroinitializer, ptr undef, align 8
@@ -186,15 +186,15 @@ define void @test5(ptr %in, ptr %out) nounwind {
 ; AVX-NEXT:    vmovdqu %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
-; X32-AVX2-LABEL: test5:
-; X32-AVX2:       # %bb.0:
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-AVX2-NEXT:    vpmovsxbw (%ecx), %xmm0
-; X32-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX2-NEXT:    vmovups %xmm1, (%eax)
-; X32-AVX2-NEXT:    vmovdqu %xmm0, (%eax)
-; X32-AVX2-NEXT:    retl
+; X86-AVX2-LABEL: test5:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vpmovsxbw (%ecx), %xmm0
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX2-NEXT:    vmovups %xmm1, (%eax)
+; X86-AVX2-NEXT:    vmovdqu %xmm0, (%eax)
+; X86-AVX2-NEXT:    retl
   %wide.load35 = load <8 x i8>, ptr %in, align 1
   %sext = sext <8 x i8> %wide.load35 to <8 x i16>
   store <8 x i16> zeroinitializer, ptr undef, align 8
@@ -233,16 +233,16 @@ define void @test6(ptr %in, ptr %out) nounwind {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; X32-AVX2-LABEL: test6:
-; X32-AVX2:       # %bb.0:
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-AVX2-NEXT:    vpmovsxbw (%ecx), %ymm0
-; X32-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX2-NEXT:    vmovups %ymm1, (%eax)
-; X32-AVX2-NEXT:    vmovdqu %ymm0, (%eax)
-; X32-AVX2-NEXT:    vzeroupper
-; X32-AVX2-NEXT:    retl
+; X86-AVX2-LABEL: test6:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vpmovsxbw (%ecx), %ymm0
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX2-NEXT:    vmovups %ymm1, (%eax)
+; X86-AVX2-NEXT:    vmovdqu %ymm0, (%eax)
+; X86-AVX2-NEXT:    vzeroupper
+; X86-AVX2-NEXT:    retl
   %wide.load35 = load <16 x i8>, ptr %in, align 1
   %sext = sext <16 x i8> %wide.load35 to <16 x i16>
   store <16 x i16> zeroinitializer, ptr undef, align 8
@@ -267,15 +267,15 @@ define void @test7(ptr %in, ptr %out) nounwind {
 ; AVX-NEXT:    vmovdqu %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
-; X32-AVX2-LABEL: test7:
-; X32-AVX2:       # %bb.0:
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-AVX2-NEXT:    vpmovsxwq (%ecx), %xmm0
-; X32-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX2-NEXT:    vmovups %xmm1, (%eax)
-; X32-AVX2-NEXT:    vmovdqu %xmm0, (%eax)
-; X32-AVX2-NEXT:    retl
+; X86-AVX2-LABEL: test7:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vpmovsxwq (%ecx), %xmm0
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX2-NEXT:    vmovups %xmm1, (%eax)
+; X86-AVX2-NEXT:    vmovdqu %xmm0, (%eax)
+; X86-AVX2-NEXT:    retl
   %wide.load35 = load <2 x i16>, ptr %in, align 1
   %sext = sext <2 x i16> %wide.load35 to <2 x i64>
   store <2 x i64> zeroinitializer, ptr undef, align 8
@@ -314,16 +314,16 @@ define void @test8(ptr %in, ptr %out) nounwind {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; X32-AVX2-LABEL: test8:
-; X32-AVX2:       # %bb.0:
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-AVX2-NEXT:    vpmovsxwq (%ecx), %ymm0
-; X32-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX2-NEXT:    vmovups %ymm1, (%eax)
-; X32-AVX2-NEXT:    vmovdqu %ymm0, (%eax)
-; X32-AVX2-NEXT:    vzeroupper
-; X32-AVX2-NEXT:    retl
+; X86-AVX2-LABEL: test8:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vpmovsxwq (%ecx), %ymm0
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX2-NEXT:    vmovups %ymm1, (%eax)
+; X86-AVX2-NEXT:    vmovdqu %ymm0, (%eax)
+; X86-AVX2-NEXT:    vzeroupper
+; X86-AVX2-NEXT:    retl
   %wide.load35 = load <4 x i16>, ptr %in, align 1
   %sext = sext <4 x i16> %wide.load35 to <4 x i64>
   store <4 x i64> zeroinitializer, ptr undef, align 8
@@ -348,15 +348,15 @@ define void @test9(ptr %in, ptr %out) nounwind {
 ; AVX-NEXT:    vmovdqu %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
-; X32-AVX2-LABEL: test9:
-; X32-AVX2:       # %bb.0:
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-AVX2-NEXT:    vpmovsxwd (%ecx), %xmm0
-; X32-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX2-NEXT:    vmovups %xmm1, (%eax)
-; X32-AVX2-NEXT:    vmovdqu %xmm0, (%eax)
-; X32-AVX2-NEXT:    retl
+; X86-AVX2-LABEL: test9:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vpmovsxwd (%ecx), %xmm0
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX2-NEXT:    vmovups %xmm1, (%eax)
+; X86-AVX2-NEXT:    vmovdqu %xmm0, (%eax)
+; X86-AVX2-NEXT:    retl
   %wide.load35 = load <4 x i16>, ptr %in, align 1
   %sext = sext <4 x i16> %wide.load35 to <4 x i32>
   store <4 x i32> zeroinitializer, ptr undef, align 8
@@ -395,16 +395,16 @@ define void @test10(ptr %in, ptr %out) nounwind {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; X32-AVX2-LABEL: test10:
-; X32-AVX2:       # %bb.0:
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-AVX2-NEXT:    vpmovsxwd (%ecx), %ymm0
-; X32-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX2-NEXT:    vmovups %ymm1, (%eax)
-; X32-AVX2-NEXT:    vmovdqu %ymm0, (%eax)
-; X32-AVX2-NEXT:    vzeroupper
-; X32-AVX2-NEXT:    retl
+; X86-AVX2-LABEL: test10:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vpmovsxwd (%ecx), %ymm0
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX2-NEXT:    vmovups %ymm1, (%eax)
+; X86-AVX2-NEXT:    vmovdqu %ymm0, (%eax)
+; X86-AVX2-NEXT:    vzeroupper
+; X86-AVX2-NEXT:    retl
   %wide.load35 = load <8 x i16>, ptr %in, align 1
   %sext = sext <8 x i16> %wide.load35 to <8 x i32>
   store <8 x i32> zeroinitializer, ptr undef, align 8
@@ -429,15 +429,15 @@ define void @test11(ptr %in, ptr %out) nounwind {
 ; AVX-NEXT:    vmovdqu %xmm0, (%rsi)
 ; AVX-NEXT:    retq
 ;
-; X32-AVX2-LABEL: test11:
-; X32-AVX2:       # %bb.0:
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-AVX2-NEXT:    vpmovsxdq (%ecx), %xmm0
-; X32-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX2-NEXT:    vmovups %xmm1, (%eax)
-; X32-AVX2-NEXT:    vmovdqu %xmm0, (%eax)
-; X32-AVX2-NEXT:    retl
+; X86-AVX2-LABEL: test11:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vpmovsxdq (%ecx), %xmm0
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX2-NEXT:    vmovups %xmm1, (%eax)
+; X86-AVX2-NEXT:    vmovdqu %xmm0, (%eax)
+; X86-AVX2-NEXT:    retl
   %wide.load35 = load <2 x i32>, ptr %in, align 1
   %sext = sext <2 x i32> %wide.load35 to <2 x i64>
   store <2 x i64> zeroinitializer, ptr undef, align 8
@@ -476,16 +476,16 @@ define void @test12(ptr %in, ptr %out) nounwind {
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; X32-AVX2-LABEL: test12:
-; X32-AVX2:       # %bb.0:
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-AVX2-NEXT:    vpmovsxdq (%ecx), %ymm0
-; X32-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX2-NEXT:    vmovups %ymm1, (%eax)
-; X32-AVX2-NEXT:    vmovdqu %ymm0, (%eax)
-; X32-AVX2-NEXT:    vzeroupper
-; X32-AVX2-NEXT:    retl
+; X86-AVX2-LABEL: test12:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vpmovsxdq (%ecx), %ymm0
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX2-NEXT:    vmovups %ymm1, (%eax)
+; X86-AVX2-NEXT:    vmovdqu %ymm0, (%eax)
+; X86-AVX2-NEXT:    vzeroupper
+; X86-AVX2-NEXT:    retl
   %wide.load35 = load <4 x i32>, ptr %in, align 1
   %sext = sext <4 x i32> %wide.load35 to <4 x i64>
   store <4 x i64> zeroinitializer, ptr undef, align 8


        


More information about the llvm-commits mailing list