[llvm] 93f967e - [X86] vec_ss_load_fold.ll - use X86 check prefix instead of X32

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sun Jan 29 08:48:49 PST 2023


Author: Simon Pilgrim
Date: 2023-01-29T16:47:19Z
New Revision: 93f967e2005d21201add0b4fc6af6a9f022600c4

URL: https://github.com/llvm/llvm-project/commit/93f967e2005d21201add0b4fc6af6a9f022600c4
DIFF: https://github.com/llvm/llvm-project/commit/93f967e2005d21201add0b4fc6af6a9f022600c4.diff

LOG: [X86] vec_ss_load_fold.ll - use X86 check prefix instead of X32

We try to use X32 for tests on gnux32 triples

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/vec_ss_load_fold.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/vec_ss_load_fold.ll b/llvm/test/CodeGen/X86/vec_ss_load_fold.ll
index 26893e8d11f0..e4304f2cc214 100644
--- a/llvm/test/CodeGen/X86/vec_ss_load_fold.ll
+++ b/llvm/test/CodeGen/X86/vec_ss_load_fold.ll
@@ -1,24 +1,24 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin9 -mattr=+sse,+sse2,+sse4.1 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin9 -mattr=+sse,+sse2,+sse4.1 | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin9 -mattr=+avx | FileCheck %s --check-prefix=X32_AVX --check-prefix=X32_AVX1
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin9 -mattr=+avx | FileCheck %s --check-prefix=X64_AVX --check-prefix=X64_AVX1
-; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin9 -mattr=+avx512f | FileCheck %s --check-prefix=X32_AVX --check-prefix=X32_AVX512
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin9 -mattr=+avx512f | FileCheck %s --check-prefix=X64_AVX --check-prefix=X64_AVX512
+; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin9 -mattr=+sse,+sse2,+sse4.1 | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin9 -mattr=+sse,+sse2,+sse4.1 | FileCheck %s --check-prefixes=X64
+; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin9 -mattr=+avx | FileCheck %s --check-prefixes=X86_AVX,X86_AVX1
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin9 -mattr=+avx | FileCheck %s --check-prefixes=X64_AVX,X64_AVX1
+; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin9 -mattr=+avx512f | FileCheck %s --check-prefixes=X86_AVX,X86_AVX512
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin9 -mattr=+avx512f | FileCheck %s --check-prefixes=X64_AVX,X64_AVX512
 
 define i16 @test1(float %f) nounwind {
-; X32-LABEL: test1:
-; X32:       ## %bb.0:
-; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    addss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    mulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    xorps %xmm1, %xmm1
-; X32-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
-; X32-NEXT:    minss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    maxss %xmm1, %xmm0
-; X32-NEXT:    cvttss2si %xmm0, %eax
-; X32-NEXT:    ## kill: def $ax killed $ax killed $eax
-; X32-NEXT:    retl
+; X86-LABEL: test1:
+; X86:       ## %bb.0:
+; X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    addss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    mulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    xorps %xmm1, %xmm1
+; X86-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; X86-NEXT:    minss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    maxss %xmm1, %xmm0
+; X86-NEXT:    cvttss2si %xmm0, %eax
+; X86-NEXT:    ## kill: def $ax killed $ax killed $eax
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test1:
 ; X64:       ## %bb.0:
@@ -32,18 +32,18 @@ define i16 @test1(float %f) nounwind {
 ; X64-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
 ;
-; X32_AVX1-LABEL: test1:
-; X32_AVX1:       ## %bb.0:
-; X32_AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX1-NEXT:    vaddss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X32_AVX1-NEXT:    vmulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X32_AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32_AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
-; X32_AVX1-NEXT:    vminss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X32_AVX1-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
-; X32_AVX1-NEXT:    vcvttss2si %xmm0, %eax
-; X32_AVX1-NEXT:    ## kill: def $ax killed $ax killed $eax
-; X32_AVX1-NEXT:    retl
+; X86_AVX1-LABEL: test1:
+; X86_AVX1:       ## %bb.0:
+; X86_AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86_AVX1-NEXT:    vaddss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86_AVX1-NEXT:    vmulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86_AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86_AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; X86_AVX1-NEXT:    vminss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86_AVX1-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
+; X86_AVX1-NEXT:    vcvttss2si %xmm0, %eax
+; X86_AVX1-NEXT:    ## kill: def $ax killed $ax killed $eax
+; X86_AVX1-NEXT:    retl
 ;
 ; X64_AVX1-LABEL: test1:
 ; X64_AVX1:       ## %bb.0:
@@ -57,19 +57,19 @@ define i16 @test1(float %f) nounwind {
 ; X64_AVX1-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64_AVX1-NEXT:    retq
 ;
-; X32_AVX512-LABEL: test1:
-; X32_AVX512:       ## %bb.0:
-; X32_AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX512-NEXT:    vaddss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X32_AVX512-NEXT:    vmulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X32_AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32_AVX512-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
-; X32_AVX512-NEXT:    vminss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X32_AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32_AVX512-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
-; X32_AVX512-NEXT:    vcvttss2si %xmm0, %eax
-; X32_AVX512-NEXT:    ## kill: def $ax killed $ax killed $eax
-; X32_AVX512-NEXT:    retl
+; X86_AVX512-LABEL: test1:
+; X86_AVX512:       ## %bb.0:
+; X86_AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86_AVX512-NEXT:    vaddss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86_AVX512-NEXT:    vmulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86_AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86_AVX512-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; X86_AVX512-NEXT:    vminss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86_AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86_AVX512-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
+; X86_AVX512-NEXT:    vcvttss2si %xmm0, %eax
+; X86_AVX512-NEXT:    ## kill: def $ax killed $ax killed $eax
+; X86_AVX512-NEXT:    retl
 ;
 ; X64_AVX512-LABEL: test1:
 ; X64_AVX512:       ## %bb.0:
@@ -97,17 +97,17 @@ define i16 @test1(float %f) nounwind {
 }
 
 define i16 @test2(float %f) nounwind {
-; X32-LABEL: test2:
-; X32:       ## %bb.0:
-; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    addss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    mulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    minss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT:    xorps %xmm1, %xmm1
-; X32-NEXT:    maxss %xmm1, %xmm0
-; X32-NEXT:    cvttss2si %xmm0, %eax
-; X32-NEXT:    ## kill: def $ax killed $ax killed $eax
-; X32-NEXT:    retl
+; X86-LABEL: test2:
+; X86:       ## %bb.0:
+; X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    addss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    mulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    minss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    xorps %xmm1, %xmm1
+; X86-NEXT:    maxss %xmm1, %xmm0
+; X86-NEXT:    cvttss2si %xmm0, %eax
+; X86-NEXT:    ## kill: def $ax killed $ax killed $eax
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test2:
 ; X64:       ## %bb.0:
@@ -120,17 +120,17 @@ define i16 @test2(float %f) nounwind {
 ; X64-NEXT:    ## kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
 ;
-; X32_AVX-LABEL: test2:
-; X32_AVX:       ## %bb.0:
-; X32_AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX-NEXT:    vaddss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X32_AVX-NEXT:    vmulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X32_AVX-NEXT:    vminss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X32_AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32_AVX-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
-; X32_AVX-NEXT:    vcvttss2si %xmm0, %eax
-; X32_AVX-NEXT:    ## kill: def $ax killed $ax killed $eax
-; X32_AVX-NEXT:    retl
+; X86_AVX-LABEL: test2:
+; X86_AVX:       ## %bb.0:
+; X86_AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86_AVX-NEXT:    vaddss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86_AVX-NEXT:    vmulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86_AVX-NEXT:    vminss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86_AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86_AVX-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
+; X86_AVX-NEXT:    vcvttss2si %xmm0, %eax
+; X86_AVX-NEXT:    ## kill: def $ax killed $ax killed $eax
+; X86_AVX-NEXT:    retl
 ;
 ; X64_AVX-LABEL: test2:
 ; X64_AVX:       ## %bb.0:
@@ -167,22 +167,22 @@ declare <4 x float> @llvm.x86.sse41.round.ss(<4 x float>, <4 x float>, i32)
 declare <4 x float> @f()
 
 define <4 x float> @test3(<4 x float> %A, ptr%b, i32 %C) nounwind {
-; X32-LABEL: test3:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    roundss $4, (%eax), %xmm0
-; X32-NEXT:    retl
+; X86-LABEL: test3:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    roundss $4, (%eax), %xmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test3:
 ; X64:       ## %bb.0:
 ; X64-NEXT:    roundss $4, (%rdi), %xmm0
 ; X64-NEXT:    retq
 ;
-; X32_AVX-LABEL: test3:
-; X32_AVX:       ## %bb.0:
-; X32_AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32_AVX-NEXT:    vroundss $4, (%eax), %xmm0, %xmm0
-; X32_AVX-NEXT:    retl
+; X86_AVX-LABEL: test3:
+; X86_AVX:       ## %bb.0:
+; X86_AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86_AVX-NEXT:    vroundss $4, (%eax), %xmm0, %xmm0
+; X86_AVX-NEXT:    retl
 ;
 ; X64_AVX-LABEL: test3:
 ; X64_AVX:       ## %bb.0:
@@ -195,16 +195,16 @@ define <4 x float> @test3(<4 x float> %A, ptr%b, i32 %C) nounwind {
 }
 
 define <4 x float> @test4(<4 x float> %A, ptr%b, i32 %C) nounwind {
-; X32-LABEL: test4:
-; X32:       ## %bb.0:
-; X32-NEXT:    subl $28, %esp
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT:    movaps %xmm0, (%esp) ## 16-byte Spill
-; X32-NEXT:    calll _f
-; X32-NEXT:    roundss $4, (%esp), %xmm0 ## 16-byte Folded Reload
-; X32-NEXT:    addl $28, %esp
-; X32-NEXT:    retl
+; X86-LABEL: test4:
+; X86:       ## %bb.0:
+; X86-NEXT:    subl $28, %esp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    movaps %xmm0, (%esp) ## 16-byte Spill
+; X86-NEXT:    calll _f
+; X86-NEXT:    roundss $4, (%esp), %xmm0 ## 16-byte Folded Reload
+; X86-NEXT:    addl $28, %esp
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test4:
 ; X64:       ## %bb.0:
@@ -216,16 +216,16 @@ define <4 x float> @test4(<4 x float> %A, ptr%b, i32 %C) nounwind {
 ; X64-NEXT:    addq $24, %rsp
 ; X64-NEXT:    retq
 ;
-; X32_AVX-LABEL: test4:
-; X32_AVX:       ## %bb.0:
-; X32_AVX-NEXT:    subl $28, %esp
-; X32_AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32_AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX-NEXT:    vmovaps %xmm0, (%esp) ## 16-byte Spill
-; X32_AVX-NEXT:    calll _f
-; X32_AVX-NEXT:    vroundss $4, (%esp), %xmm0, %xmm0 ## 16-byte Folded Reload
-; X32_AVX-NEXT:    addl $28, %esp
-; X32_AVX-NEXT:    retl
+; X86_AVX-LABEL: test4:
+; X86_AVX:       ## %bb.0:
+; X86_AVX-NEXT:    subl $28, %esp
+; X86_AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86_AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86_AVX-NEXT:    vmovaps %xmm0, (%esp) ## 16-byte Spill
+; X86_AVX-NEXT:    calll _f
+; X86_AVX-NEXT:    vroundss $4, (%esp), %xmm0, %xmm0 ## 16-byte Folded Reload
+; X86_AVX-NEXT:    addl $28, %esp
+; X86_AVX-NEXT:    retl
 ;
 ; X64_AVX-LABEL: test4:
 ; X64_AVX:       ## %bb.0:
@@ -245,20 +245,20 @@ define <4 x float> @test4(<4 x float> %A, ptr%b, i32 %C) nounwind {
 
 ; PR13576
 define  <2 x double> @test5() nounwind uwtable readnone noinline {
-; X32-LABEL: test5:
-; X32:       ## %bb.0: ## %entry
-; X32-NEXT:    movaps {{.*#+}} xmm0 = [1.28E+2,1.23321E+2]
-; X32-NEXT:    retl
+; X86-LABEL: test5:
+; X86:       ## %bb.0: ## %entry
+; X86-NEXT:    movaps {{.*#+}} xmm0 = [1.28E+2,1.23321E+2]
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test5:
 ; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    movaps {{.*#+}} xmm0 = [1.28E+2,1.23321E+2]
 ; X64-NEXT:    retq
 ;
-; X32_AVX-LABEL: test5:
-; X32_AVX:       ## %bb.0: ## %entry
-; X32_AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [1.28E+2,1.23321E+2]
-; X32_AVX-NEXT:    retl
+; X86_AVX-LABEL: test5:
+; X86_AVX:       ## %bb.0: ## %entry
+; X86_AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [1.28E+2,1.23321E+2]
+; X86_AVX-NEXT:    retl
 ;
 ; X64_AVX-LABEL: test5:
 ; X64_AVX:       ## %bb.0: ## %entry
@@ -272,22 +272,22 @@ entry:
 declare <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double>, i32) nounwind readnone
 
 define <4 x float> @minss_fold(ptr %x, <4 x float> %y) {
-; X32-LABEL: minss_fold:
-; X32:       ## %bb.0: ## %entry
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    minss (%eax), %xmm0
-; X32-NEXT:    retl
+; X86-LABEL: minss_fold:
+; X86:       ## %bb.0: ## %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    minss (%eax), %xmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: minss_fold:
 ; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    minss (%rdi), %xmm0
 ; X64-NEXT:    retq
 ;
-; X32_AVX-LABEL: minss_fold:
-; X32_AVX:       ## %bb.0: ## %entry
-; X32_AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32_AVX-NEXT:    vminss (%eax), %xmm0, %xmm0
-; X32_AVX-NEXT:    retl
+; X86_AVX-LABEL: minss_fold:
+; X86_AVX:       ## %bb.0: ## %entry
+; X86_AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86_AVX-NEXT:    vminss (%eax), %xmm0, %xmm0
+; X86_AVX-NEXT:    retl
 ;
 ; X64_AVX-LABEL: minss_fold:
 ; X64_AVX:       ## %bb.0: ## %entry
@@ -304,22 +304,22 @@ entry:
 }
 
 define <4 x float> @maxss_fold(ptr %x, <4 x float> %y) {
-; X32-LABEL: maxss_fold:
-; X32:       ## %bb.0: ## %entry
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    maxss (%eax), %xmm0
-; X32-NEXT:    retl
+; X86-LABEL: maxss_fold:
+; X86:       ## %bb.0: ## %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    maxss (%eax), %xmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: maxss_fold:
 ; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    maxss (%rdi), %xmm0
 ; X64-NEXT:    retq
 ;
-; X32_AVX-LABEL: maxss_fold:
-; X32_AVX:       ## %bb.0: ## %entry
-; X32_AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32_AVX-NEXT:    vmaxss (%eax), %xmm0, %xmm0
-; X32_AVX-NEXT:    retl
+; X86_AVX-LABEL: maxss_fold:
+; X86_AVX:       ## %bb.0: ## %entry
+; X86_AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86_AVX-NEXT:    vmaxss (%eax), %xmm0, %xmm0
+; X86_AVX-NEXT:    retl
 ;
 ; X64_AVX-LABEL: maxss_fold:
 ; X64_AVX:       ## %bb.0: ## %entry
@@ -336,22 +336,22 @@ entry:
 }
 
 define <4 x float> @cmpss_fold(ptr %x, <4 x float> %y) {
-; X32-LABEL: cmpss_fold:
-; X32:       ## %bb.0: ## %entry
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    cmpeqss (%eax), %xmm0
-; X32-NEXT:    retl
+; X86-LABEL: cmpss_fold:
+; X86:       ## %bb.0: ## %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpeqss (%eax), %xmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: cmpss_fold:
 ; X64:       ## %bb.0: ## %entry
 ; X64-NEXT:    cmpeqss (%rdi), %xmm0
 ; X64-NEXT:    retq
 ;
-; X32_AVX-LABEL: cmpss_fold:
-; X32_AVX:       ## %bb.0: ## %entry
-; X32_AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32_AVX-NEXT:    vcmpeqss (%eax), %xmm0, %xmm0
-; X32_AVX-NEXT:    retl
+; X86_AVX-LABEL: cmpss_fold:
+; X86_AVX:       ## %bb.0: ## %entry
+; X86_AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86_AVX-NEXT:    vcmpeqss (%eax), %xmm0, %xmm0
+; X86_AVX-NEXT:    retl
 ;
 ; X64_AVX-LABEL: cmpss_fold:
 ; X64_AVX:       ## %bb.0: ## %entry
@@ -370,15 +370,15 @@ declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind
 
 
 define <4 x float> @double_fold(ptr %x, <4 x float> %y) {
-; X32-LABEL: double_fold:
-; X32:       ## %bb.0: ## %entry
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT:    movaps %xmm0, %xmm2
-; X32-NEXT:    minss %xmm1, %xmm2
-; X32-NEXT:    maxss %xmm1, %xmm0
-; X32-NEXT:    addps %xmm2, %xmm0
-; X32-NEXT:    retl
+; X86-LABEL: double_fold:
+; X86:       ## %bb.0: ## %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT:    movaps %xmm0, %xmm2
+; X86-NEXT:    minss %xmm1, %xmm2
+; X86-NEXT:    maxss %xmm1, %xmm0
+; X86-NEXT:    addps %xmm2, %xmm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: double_fold:
 ; X64:       ## %bb.0: ## %entry
@@ -389,14 +389,14 @@ define <4 x float> @double_fold(ptr %x, <4 x float> %y) {
 ; X64-NEXT:    addps %xmm2, %xmm0
 ; X64-NEXT:    retq
 ;
-; X32_AVX-LABEL: double_fold:
-; X32_AVX:       ## %bb.0: ## %entry
-; X32_AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32_AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32_AVX-NEXT:    vminss %xmm1, %xmm0, %xmm2
-; X32_AVX-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
-; X32_AVX-NEXT:    vaddps %xmm0, %xmm2, %xmm0
-; X32_AVX-NEXT:    retl
+; X86_AVX-LABEL: double_fold:
+; X86_AVX:       ## %bb.0: ## %entry
+; X86_AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86_AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86_AVX-NEXT:    vminss %xmm1, %xmm0, %xmm2
+; X86_AVX-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
+; X86_AVX-NEXT:    vaddps %xmm0, %xmm2, %xmm0
+; X86_AVX-NEXT:    retl
 ;
 ; X64_AVX-LABEL: double_fold:
 ; X64_AVX:       ## %bb.0: ## %entry


        


More information about the llvm-commits mailing list