[llvm] 61dcfaa - [X86] i64-mem-copy.ll - replace X32 checks with X86. NFC.

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 8 09:26:04 PST 2024


Author: Simon Pilgrim
Date: 2024-01-08T17:25:43Z
New Revision: 61dcfaa745e22b0e5330fc82ee4b7de4b6c99ab7

URL: https://github.com/llvm/llvm-project/commit/61dcfaa745e22b0e5330fc82ee4b7de4b6c99ab7
DIFF: https://github.com/llvm/llvm-project/commit/61dcfaa745e22b0e5330fc82ee4b7de4b6c99ab7.diff

LOG: [X86] i64-mem-copy.ll - replace X32 checks with X86. NFC.

We try to use X32 for gnux32 triples only.

Add nounwind to remove cfi noise as well.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/i64-mem-copy.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/i64-mem-copy.ll b/llvm/test/CodeGen/X86/i64-mem-copy.ll
index 500c6c7876944a..4cdb079d439936 100644
--- a/llvm/test/CodeGen/X86/i64-mem-copy.ll
+++ b/llvm/test/CodeGen/X86/i64-mem-copy.ll
@@ -1,33 +1,33 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X32AVX
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X86AVX
 
 ; Use movq or movsd to load / store i64 values if sse2 is available.
 ; rdar://6659858
 
-define void @foo(ptr %x, ptr %y) {
+define void @foo(ptr %x, ptr %y) nounwind {
 ; X64-LABEL: foo:
 ; X64:       # %bb.0:
 ; X64-NEXT:    movq (%rsi), %rax
 ; X64-NEXT:    movq %rax, (%rdi)
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: foo:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    movsd %xmm0, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: foo:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    movsd %xmm0, (%eax)
+; X86-NEXT:    retl
 ;
-; X32AVX-LABEL: foo:
-; X32AVX:       # %bb.0:
-; X32AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32AVX-NEXT:    vmovsd %xmm0, (%eax)
-; X32AVX-NEXT:    retl
+; X86AVX-LABEL: foo:
+; X86AVX:       # %bb.0:
+; X86AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86AVX-NEXT:    vmovsd %xmm0, (%eax)
+; X86AVX-NEXT:    retl
   %tmp1 = load i64, ptr %y, align 8
   store i64 %tmp1, ptr %x, align 8
   ret void
@@ -36,26 +36,26 @@ define void @foo(ptr %x, ptr %y) {
 ; Verify that a 64-bit chunk extracted from a vector is stored with a movq
 ; regardless of whether the system is 64-bit.
 
-define void @store_i64_from_vector(<8 x i16> %x, <8 x i16> %y, ptr %i) {
+define void @store_i64_from_vector(<8 x i16> %x, <8 x i16> %y, ptr %i) nounwind {
 ; X64-LABEL: store_i64_from_vector:
 ; X64:       # %bb.0:
 ; X64-NEXT:    paddw %xmm1, %xmm0
 ; X64-NEXT:    movq %xmm0, (%rdi)
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: store_i64_from_vector:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    paddw %xmm1, %xmm0
-; X32-NEXT:    movq %xmm0, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: store_i64_from_vector:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    paddw %xmm1, %xmm0
+; X86-NEXT:    movq %xmm0, (%eax)
+; X86-NEXT:    retl
 ;
-; X32AVX-LABEL: store_i64_from_vector:
-; X32AVX:       # %bb.0:
-; X32AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32AVX-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
-; X32AVX-NEXT:    vmovq %xmm0, (%eax)
-; X32AVX-NEXT:    retl
+; X86AVX-LABEL: store_i64_from_vector:
+; X86AVX:       # %bb.0:
+; X86AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86AVX-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; X86AVX-NEXT:    vmovq %xmm0, (%eax)
+; X86AVX-NEXT:    retl
   %z = add <8 x i16> %x, %y                          ; force execution domain
   %bc = bitcast <8 x i16> %z to <2 x i64>
   %vecext = extractelement <2 x i64> %bc, i32 0
@@ -63,39 +63,35 @@ define void @store_i64_from_vector(<8 x i16> %x, <8 x i16> %y, ptr %i) {
   ret void
 }
 
-define void @store_i64_from_vector256(<16 x i16> %x, <16 x i16> %y, ptr %i) {
+define void @store_i64_from_vector256(<16 x i16> %x, <16 x i16> %y, ptr %i) nounwind {
 ; X64-LABEL: store_i64_from_vector256:
 ; X64:       # %bb.0:
 ; X64-NEXT:    paddw %xmm3, %xmm1
 ; X64-NEXT:    movq %xmm1, (%rdi)
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: store_i64_from_vector256:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    .cfi_offset %ebp, -8
-; X32-NEXT:    movl %esp, %ebp
-; X32-NEXT:    .cfi_def_cfa_register %ebp
-; X32-NEXT:    andl $-16, %esp
-; X32-NEXT:    subl $16, %esp
-; X32-NEXT:    movl 24(%ebp), %eax
-; X32-NEXT:    paddw 8(%ebp), %xmm1
-; X32-NEXT:    movq %xmm1, (%eax)
-; X32-NEXT:    movl %ebp, %esp
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    .cfi_def_cfa %esp, 4
-; X32-NEXT:    retl
+; X86-LABEL: store_i64_from_vector256:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-16, %esp
+; X86-NEXT:    subl $16, %esp
+; X86-NEXT:    movl 24(%ebp), %eax
+; X86-NEXT:    paddw 8(%ebp), %xmm1
+; X86-NEXT:    movq %xmm1, (%eax)
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; X32AVX-LABEL: store_i64_from_vector256:
-; X32AVX:       # %bb.0:
-; X32AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32AVX-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; X32AVX-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; X32AVX-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
-; X32AVX-NEXT:    vmovq %xmm0, (%eax)
-; X32AVX-NEXT:    vzeroupper
-; X32AVX-NEXT:    retl
+; X86AVX-LABEL: store_i64_from_vector256:
+; X86AVX:       # %bb.0:
+; X86AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86AVX-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; X86AVX-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; X86AVX-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; X86AVX-NEXT:    vmovq %xmm0, (%eax)
+; X86AVX-NEXT:    vzeroupper
+; X86AVX-NEXT:    retl
   %z = add <16 x i16> %x, %y                          ; force execution domain
   %bc = bitcast <16 x i16> %z to <4 x i64>
   %vecext = extractelement <4 x i64> %bc, i32 2
@@ -125,46 +121,46 @@ define void @PR23476(<5 x i64> %in, ptr %out, i32 %index) nounwind {
 ; X64-NEXT:    movq %rax, (%r9)
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: PR23476:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    movl %esp, %ebp
-; X32-NEXT:    andl $-16, %esp
-; X32-NEXT:    subl $80, %esp
-; X32-NEXT:    movl 52(%ebp), %eax
-; X32-NEXT:    andl $7, %eax
-; X32-NEXT:    movl 48(%ebp), %ecx
-; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    movups 8(%ebp), %xmm1
-; X32-NEXT:    movups 24(%ebp), %xmm2
-; X32-NEXT:    movaps %xmm2, {{[0-9]+}}(%esp)
-; X32-NEXT:    movaps %xmm1, (%esp)
-; X32-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
-; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT:    movsd %xmm0, (%ecx)
-; X32-NEXT:    movl %ebp, %esp
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    retl
+; X86-LABEL: PR23476:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-16, %esp
+; X86-NEXT:    subl $80, %esp
+; X86-NEXT:    movl 52(%ebp), %eax
+; X86-NEXT:    andl $7, %eax
+; X86-NEXT:    movl 48(%ebp), %ecx
+; X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    movups 8(%ebp), %xmm1
+; X86-NEXT:    movups 24(%ebp), %xmm2
+; X86-NEXT:    movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NEXT:    movaps %xmm1, (%esp)
+; X86-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    movsd %xmm0, (%ecx)
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; X32AVX-LABEL: PR23476:
-; X32AVX:       # %bb.0:
-; X32AVX-NEXT:    pushl %ebp
-; X32AVX-NEXT:    movl %esp, %ebp
-; X32AVX-NEXT:    andl $-32, %esp
-; X32AVX-NEXT:    subl $96, %esp
-; X32AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32AVX-NEXT:    movl 52(%ebp), %eax
-; X32AVX-NEXT:    andl $7, %eax
-; X32AVX-NEXT:    movl 48(%ebp), %ecx
-; X32AVX-NEXT:    vmovups 8(%ebp), %ymm1
-; X32AVX-NEXT:    vmovaps %ymm1, (%esp)
-; X32AVX-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32AVX-NEXT:    vmovsd %xmm0, (%ecx)
-; X32AVX-NEXT:    movl %ebp, %esp
-; X32AVX-NEXT:    popl %ebp
-; X32AVX-NEXT:    vzeroupper
-; X32AVX-NEXT:    retl
+; X86AVX-LABEL: PR23476:
+; X86AVX:       # %bb.0:
+; X86AVX-NEXT:    pushl %ebp
+; X86AVX-NEXT:    movl %esp, %ebp
+; X86AVX-NEXT:    andl $-32, %esp
+; X86AVX-NEXT:    subl $96, %esp
+; X86AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86AVX-NEXT:    movl 52(%ebp), %eax
+; X86AVX-NEXT:    andl $7, %eax
+; X86AVX-NEXT:    movl 48(%ebp), %ecx
+; X86AVX-NEXT:    vmovups 8(%ebp), %ymm1
+; X86AVX-NEXT:    vmovaps %ymm1, (%esp)
+; X86AVX-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp)
+; X86AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86AVX-NEXT:    vmovsd %xmm0, (%ecx)
+; X86AVX-NEXT:    movl %ebp, %esp
+; X86AVX-NEXT:    popl %ebp
+; X86AVX-NEXT:    vzeroupper
+; X86AVX-NEXT:    retl
   %ext = extractelement <5 x i64> %in, i32 %index
   store i64 %ext, ptr %out, align 8
   ret void


        


More information about the llvm-commits mailing list