[llvm] 7b099b1 - [X86] 2011-10-19-widen_vselect.ll - replace X32 check prefix with X86. NFC.

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 17 04:42:53 PST 2020


Author: Simon Pilgrim
Date: 2020-11-17T12:39:46Z
New Revision: 7b099b1e1f66405bcc176e77a5b812f527b67be6

URL: https://github.com/llvm/llvm-project/commit/7b099b1e1f66405bcc176e77a5b812f527b67be6
DIFF: https://github.com/llvm/llvm-project/commit/7b099b1e1f66405bcc176e77a5b812f527b67be6.diff

LOG: [X86] 2011-10-19-widen_vselect.ll - replace X32 check prefix with X86. NFC.

We typically use X32 for gnux32 triples

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll b/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll
index 292cfb1d7557..a7be89ffb67f 100644
--- a/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll
+++ b/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll
@@ -1,15 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mcpu=corei7   | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mcpu=corei7   | FileCheck %s --check-prefix=X86
 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s --check-prefix=X64
 
 ; Make sure that we don't crash when legalizing vselect and vsetcc and that
 ; we are able to generate vector blend instructions.
 
 define void @simple_widen(<2 x float> %a, <2 x float> %b) {
-; X32-LABEL: simple_widen:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    movlps %xmm1, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: simple_widen:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movlps %xmm1, (%eax)
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: simple_widen:
 ; X64:       # %bb.0: # %entry
@@ -22,14 +22,14 @@ entry:
 }
 
 define void @complex_inreg_work(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
-; X32-LABEL: complex_inreg_work:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    movaps %xmm0, %xmm3
-; X32-NEXT:    cmpordps %xmm2, %xmm2
-; X32-NEXT:    movaps %xmm2, %xmm0
-; X32-NEXT:    blendvps %xmm0, %xmm3, %xmm1
-; X32-NEXT:    movlps %xmm1, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: complex_inreg_work:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movaps %xmm0, %xmm3
+; X86-NEXT:    cmpordps %xmm2, %xmm2
+; X86-NEXT:    movaps %xmm2, %xmm0
+; X86-NEXT:    blendvps %xmm0, %xmm3, %xmm1
+; X86-NEXT:    movlps %xmm1, (%eax)
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: complex_inreg_work:
 ; X64:       # %bb.0: # %entry
@@ -47,11 +47,11 @@ entry:
 }
 
 define void @zero_test() {
-; X32-LABEL: zero_test:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    xorps %xmm0, %xmm0
-; X32-NEXT:    movlps %xmm0, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: zero_test:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    xorps %xmm0, %xmm0
+; X86-NEXT:    movlps %xmm0, (%eax)
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: zero_test:
 ; X64:       # %bb.0: # %entry
@@ -65,27 +65,27 @@ entry:
 }
 
 define void @full_test() {
-; X32-LABEL: full_test:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    subl $60, %esp
-; X32-NEXT:    .cfi_def_cfa_offset 64
-; X32-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; X32-NEXT:    cvttps2dq %xmm2, %xmm0
-; X32-NEXT:    cvtdq2ps %xmm0, %xmm1
-; X32-NEXT:    xorps %xmm0, %xmm0
-; X32-NEXT:    cmpltps %xmm2, %xmm0
-; X32-NEXT:    movaps {{.*#+}} xmm3 = <1.0E+0,1.0E+0,u,u>
-; X32-NEXT:    addps %xmm1, %xmm3
-; X32-NEXT:    movaps %xmm1, %xmm4
-; X32-NEXT:    blendvps %xmm0, %xmm3, %xmm4
-; X32-NEXT:    cmpeqps %xmm2, %xmm1
-; X32-NEXT:    movaps %xmm1, %xmm0
-; X32-NEXT:    blendvps %xmm0, %xmm2, %xmm4
-; X32-NEXT:    movlps %xmm4, {{[0-9]+}}(%esp)
-; X32-NEXT:    movlps %xmm4, {{[0-9]+}}(%esp)
-; X32-NEXT:    addl $60, %esp
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
+; X86-LABEL: full_test:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    subl $60, %esp
+; X86-NEXT:    .cfi_def_cfa_offset 64
+; X86-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; X86-NEXT:    cvttps2dq %xmm2, %xmm0
+; X86-NEXT:    cvtdq2ps %xmm0, %xmm1
+; X86-NEXT:    xorps %xmm0, %xmm0
+; X86-NEXT:    cmpltps %xmm2, %xmm0
+; X86-NEXT:    movaps {{.*#+}} xmm3 = <1.0E+0,1.0E+0,u,u>
+; X86-NEXT:    addps %xmm1, %xmm3
+; X86-NEXT:    movaps %xmm1, %xmm4
+; X86-NEXT:    blendvps %xmm0, %xmm3, %xmm4
+; X86-NEXT:    cmpeqps %xmm2, %xmm1
+; X86-NEXT:    movaps %xmm1, %xmm0
+; X86-NEXT:    blendvps %xmm0, %xmm2, %xmm4
+; X86-NEXT:    movlps %xmm4, {{[0-9]+}}(%esp)
+; X86-NEXT:    movlps %xmm4, {{[0-9]+}}(%esp)
+; X86-NEXT:    addl $60, %esp
+; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: full_test:
 ; X64:       # %bb.0: # %entry


        


More information about the llvm-commits mailing list