[llvm] 03cde3a - [X86] Regenerate known-signbits-vector.ll tests.
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 4 07:13:38 PST 2019
Author: Simon Pilgrim
Date: 2019-11-04T15:12:01Z
New Revision: 03cde3a7ccd2025baa497cbcf6e825862429f1bd
URL: https://github.com/llvm/llvm-project/commit/03cde3a7ccd2025baa497cbcf6e825862429f1bd
DIFF: https://github.com/llvm/llvm-project/commit/03cde3a7ccd2025baa497cbcf6e825862429f1bd.diff
LOG: [X86] Regenerate known-signbits-vector.ll tests.
Use X86 instead of X32 and add a common CHECK prefix
Added:
Modified:
llvm/test/CodeGen/X86/known-signbits-vector.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/known-signbits-vector.ll b/llvm/test/CodeGen/X86/known-signbits-vector.ll
index 846d97d55073..57d7f66a2531 100644
--- a/llvm/test/CodeGen/X86/known-signbits-vector.ll
+++ b/llvm/test/CodeGen/X86/known-signbits-vector.ll
@@ -1,12 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64
define <2 x double> @signbits_sext_v2i64_sitofp_v2f64(i32 %a0, i32 %a1) nounwind {
-; X32-LABEL: signbits_sext_v2i64_sitofp_v2f64:
-; X32: # %bb.0:
-; X32-NEXT: vcvtdq2pd {{[0-9]+}}(%esp), %xmm0
-; X32-NEXT: retl
+; X86-LABEL: signbits_sext_v2i64_sitofp_v2f64:
+; X86: # %bb.0:
+; X86-NEXT: vcvtdq2pd {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT: retl
;
; X64-LABEL: signbits_sext_v2i64_sitofp_v2f64:
; X64: # %bb.0:
@@ -23,16 +23,16 @@ define <2 x double> @signbits_sext_v2i64_sitofp_v2f64(i32 %a0, i32 %a1) nounwind
}
define <4 x float> @signbits_sext_v4i64_sitofp_v4f32(i8 signext %a0, i16 signext %a1, i32 %a2, i32 %a3) nounwind {
-; X32-LABEL: signbits_sext_v4i64_sitofp_v4f32:
-; X32: # %bb.0:
-; X32-NEXT: movswl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movsbl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: vmovd %ecx, %xmm0
-; X32-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
-; X32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
-; X32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
-; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32-NEXT: retl
+; X86-LABEL: signbits_sext_v4i64_sitofp_v4f32:
+; X86: # %bb.0:
+; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movsbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: vmovd %ecx, %xmm0
+; X86-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; X86-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X86-NEXT: retl
;
; X64-LABEL: signbits_sext_v4i64_sitofp_v4f32:
; X64: # %bb.0:
@@ -55,15 +55,15 @@ define <4 x float> @signbits_sext_v4i64_sitofp_v4f32(i8 signext %a0, i16 signext
}
define float @signbits_ashr_extract_sitofp_0(<2 x i64> %a0) nounwind {
-; X32-LABEL: signbits_ashr_extract_sitofp_0:
-; X32: # %bb.0:
-; X32-NEXT: pushl %eax
-; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32-NEXT: vmovss %xmm0, (%esp)
-; X32-NEXT: flds (%esp)
-; X32-NEXT: popl %eax
-; X32-NEXT: retl
+; X86-LABEL: signbits_ashr_extract_sitofp_0:
+; X86: # %bb.0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X86-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: retl
;
; X64-LABEL: signbits_ashr_extract_sitofp_0:
; X64: # %bb.0:
@@ -77,15 +77,15 @@ define float @signbits_ashr_extract_sitofp_0(<2 x i64> %a0) nounwind {
}
define float @signbits_ashr_extract_sitofp_1(<2 x i64> %a0) nounwind {
-; X32-LABEL: signbits_ashr_extract_sitofp_1:
-; X32: # %bb.0:
-; X32-NEXT: pushl %eax
-; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32-NEXT: vmovss %xmm0, (%esp)
-; X32-NEXT: flds (%esp)
-; X32-NEXT: popl %eax
-; X32-NEXT: retl
+; X86-LABEL: signbits_ashr_extract_sitofp_1:
+; X86: # %bb.0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X86-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: retl
;
; X64-LABEL: signbits_ashr_extract_sitofp_1:
; X64: # %bb.0:
@@ -99,17 +99,17 @@ define float @signbits_ashr_extract_sitofp_1(<2 x i64> %a0) nounwind {
}
define float @signbits_ashr_shl_extract_sitofp(<2 x i64> %a0) nounwind {
-; X32-LABEL: signbits_ashr_shl_extract_sitofp:
-; X32: # %bb.0:
-; X32-NEXT: pushl %eax
-; X32-NEXT: vpsrad $29, %xmm0, %xmm0
-; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X32-NEXT: vpsllq $20, %xmm0, %xmm0
-; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32-NEXT: vmovss %xmm0, (%esp)
-; X32-NEXT: flds (%esp)
-; X32-NEXT: popl %eax
-; X32-NEXT: retl
+; X86-LABEL: signbits_ashr_shl_extract_sitofp:
+; X86: # %bb.0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: vpsrad $29, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X86-NEXT: vpsllq $20, %xmm0, %xmm0
+; X86-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: retl
;
; X64-LABEL: signbits_ashr_shl_extract_sitofp:
; X64: # %bb.0:
@@ -126,21 +126,21 @@ define float @signbits_ashr_shl_extract_sitofp(<2 x i64> %a0) nounwind {
}
define float @signbits_ashr_insert_ashr_extract_sitofp(i64 %a0, i64 %a1) nounwind {
-; X32-LABEL: signbits_ashr_insert_ashr_extract_sitofp:
-; X32: # %bb.0:
-; X32-NEXT: pushl %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: sarl $30, %ecx
-; X32-NEXT: shll $2, %eax
-; X32-NEXT: vmovd %eax, %xmm0
-; X32-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
-; X32-NEXT: vpsrlq $3, %xmm0, %xmm0
-; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32-NEXT: vmovss %xmm0, (%esp)
-; X32-NEXT: flds (%esp)
-; X32-NEXT: popl %eax
-; X32-NEXT: retl
+; X86-LABEL: signbits_ashr_insert_ashr_extract_sitofp:
+; X86: # %bb.0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: sarl $30, %ecx
+; X86-NEXT: shll $2, %eax
+; X86-NEXT: vmovd %eax, %xmm0
+; X86-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
+; X86-NEXT: vpsrlq $3, %xmm0, %xmm0
+; X86-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: retl
;
; X64-LABEL: signbits_ashr_insert_ashr_extract_sitofp:
; X64: # %bb.0:
@@ -159,31 +159,18 @@ define float @signbits_ashr_insert_ashr_extract_sitofp(i64 %a0, i64 %a1) nounwin
}
define <4 x double> @signbits_sext_shuffle_sitofp(<4 x i32> %a0, <4 x i64> %a1) nounwind {
-; X32-LABEL: signbits_sext_shuffle_sitofp:
-; X32: # %bb.0:
-; X32-NEXT: vpmovsxdq %xmm0, %xmm1
-; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X32-NEXT: vpmovsxdq %xmm0, %xmm0
-; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; X32-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
-; X32-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; X32-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X32-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; X32-NEXT: vcvtdq2pd %xmm0, %ymm0
-; X32-NEXT: retl
-;
-; X64-LABEL: signbits_sext_shuffle_sitofp:
-; X64: # %bb.0:
-; X64-NEXT: vpmovsxdq %xmm0, %xmm1
-; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X64-NEXT: vpmovsxdq %xmm0, %xmm0
-; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; X64-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
-; X64-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X64-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; X64-NEXT: vcvtdq2pd %xmm0, %ymm0
-; X64-NEXT: retq
+; CHECK-LABEL: signbits_sext_shuffle_sitofp:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vpmovsxdq %xmm0, %xmm1
+; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; CHECK-NEXT: vpmovsxdq %xmm0, %xmm0
+; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
+; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; CHECK-NEXT: vcvtdq2pd %xmm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
%1 = sext <4 x i32> %a0 to <4 x i64>
%2 = shufflevector <4 x i64> %1, <4 x i64>%a1, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
%3 = sitofp <4 x i64> %2 to <4 x double>
@@ -192,11 +179,11 @@ define <4 x double> @signbits_sext_shuffle_sitofp(<4 x i32> %a0, <4 x i64> %a1)
; TODO: Fix vpshufd+vpsrlq -> vpshufd/vpermilps
define <2 x double> @signbits_ashr_concat_ashr_extract_sitofp(<2 x i64> %a0, <4 x i64> %a1) nounwind {
-; X32-LABEL: signbits_ashr_concat_ashr_extract_sitofp:
-; X32: # %bb.0:
-; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; X32-NEXT: vcvtdq2pd %xmm0, %xmm0
-; X32-NEXT: retl
+; X86-LABEL: signbits_ashr_concat_ashr_extract_sitofp:
+; X86: # %bb.0:
+; X86-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; X86-NEXT: vcvtdq2pd %xmm0, %xmm0
+; X86-NEXT: retl
;
; X64-LABEL: signbits_ashr_concat_ashr_extract_sitofp:
; X64: # %bb.0:
@@ -214,18 +201,18 @@ define <2 x double> @signbits_ashr_concat_ashr_extract_sitofp(<2 x i64> %a0, <4
}
define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2 x i64> %a1, i32 %a2) nounwind {
-; X32-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp:
-; X32: # %bb.0:
-; X32-NEXT: pushl %eax
-; X32-NEXT: vpsrad $29, %xmm0, %xmm0
-; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT: vpand %xmm1, %xmm0, %xmm0
-; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32-NEXT: vmovss %xmm0, (%esp)
-; X32-NEXT: flds (%esp)
-; X32-NEXT: popl %eax
-; X32-NEXT: retl
+; X86-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp:
+; X86: # %bb.0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: vpsrad $29, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X86-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X86-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: retl
;
; X64-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp:
; X64: # %bb.0:
@@ -247,24 +234,24 @@ define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2
}
define float @signbits_ashr_sextvecinreg_bitops_extract_sitofp(<2 x i64> %a0, <4 x i32> %a1) nounwind {
-; X32-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp:
-; X32: # %bb.0:
-; X32-NEXT: pushl %eax
-; X32-NEXT: vpsrlq $60, %xmm0, %xmm2
-; X32-NEXT: vpsrlq $61, %xmm0, %xmm0
-; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
-; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [4,0,8,0]
-; X32-NEXT: vpxor %xmm2, %xmm0, %xmm0
-; X32-NEXT: vpsubq %xmm2, %xmm0, %xmm0
-; X32-NEXT: vpmovsxdq %xmm1, %xmm1
-; X32-NEXT: vpand %xmm1, %xmm0, %xmm2
-; X32-NEXT: vpor %xmm1, %xmm2, %xmm1
-; X32-NEXT: vpxor %xmm0, %xmm1, %xmm0
-; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32-NEXT: vmovss %xmm0, (%esp)
-; X32-NEXT: flds (%esp)
-; X32-NEXT: popl %eax
-; X32-NEXT: retl
+; X86-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp:
+; X86: # %bb.0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: vpsrlq $60, %xmm0, %xmm2
+; X86-NEXT: vpsrlq $61, %xmm0, %xmm0
+; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; X86-NEXT: vmovdqa {{.*#+}} xmm2 = [4,0,8,0]
+; X86-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; X86-NEXT: vpsubq %xmm2, %xmm0, %xmm0
+; X86-NEXT: vpmovsxdq %xmm1, %xmm1
+; X86-NEXT: vpand %xmm1, %xmm0, %xmm2
+; X86-NEXT: vpor %xmm1, %xmm2, %xmm1
+; X86-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; X86-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X86-NEXT: vmovss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: retl
;
; X64-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp:
; X64: # %bb.0:
@@ -292,38 +279,38 @@ define float @signbits_ashr_sextvecinreg_bitops_extract_sitofp(<2 x i64> %a0, <4
}
define <4 x float> @signbits_ashr_sext_select_shuffle_sitofp(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2, <4 x i32> %a3) nounwind {
-; X32-LABEL: signbits_ashr_sext_select_shuffle_sitofp:
-; X32: # %bb.0:
-; X32-NEXT: pushl %ebp
-; X32-NEXT: movl %esp, %ebp
-; X32-NEXT: andl $-16, %esp
-; X32-NEXT: subl $16, %esp
-; X32-NEXT: vpmovsxdq 8(%ebp), %xmm3
-; X32-NEXT: vpmovsxdq 16(%ebp), %xmm4
-; X32-NEXT: vpsrad $31, %xmm2, %xmm5
-; X32-NEXT: vpsrad $1, %xmm2, %xmm6
-; X32-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; X32-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3],xmm6[4,5],xmm5[6,7]
-; X32-NEXT: vextractf128 $1, %ymm2, %xmm2
-; X32-NEXT: vpsrad $31, %xmm2, %xmm6
-; X32-NEXT: vpsrad $1, %xmm2, %xmm2
-; X32-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; X32-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm6[2,3],xmm2[4,5],xmm6[6,7]
-; X32-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm6
-; X32-NEXT: vblendvpd %xmm6, %xmm5, %xmm3, %xmm3
-; X32-NEXT: vextractf128 $1, %ymm1, %xmm1
-; X32-NEXT: vextractf128 $1, %ymm0, %xmm0
-; X32-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
-; X32-NEXT: vblendvpd %xmm0, %xmm2, %xmm4, %xmm0
-; X32-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; X32-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; X32-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X32-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32-NEXT: movl %ebp, %esp
-; X32-NEXT: popl %ebp
-; X32-NEXT: vzeroupper
-; X32-NEXT: retl
+; X86-LABEL: signbits_ashr_sext_select_shuffle_sitofp:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-16, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: vpmovsxdq 8(%ebp), %xmm3
+; X86-NEXT: vpmovsxdq 16(%ebp), %xmm4
+; X86-NEXT: vpsrad $31, %xmm2, %xmm5
+; X86-NEXT: vpsrad $1, %xmm2, %xmm6
+; X86-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; X86-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3],xmm6[4,5],xmm5[6,7]
+; X86-NEXT: vextractf128 $1, %ymm2, %xmm2
+; X86-NEXT: vpsrad $31, %xmm2, %xmm6
+; X86-NEXT: vpsrad $1, %xmm2, %xmm2
+; X86-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; X86-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm6[2,3],xmm2[4,5],xmm6[6,7]
+; X86-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm6
+; X86-NEXT: vblendvpd %xmm6, %xmm5, %xmm3, %xmm3
+; X86-NEXT: vextractf128 $1, %ymm1, %xmm1
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X86-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; X86-NEXT: vblendvpd %xmm0, %xmm2, %xmm4, %xmm0
+; X86-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; X86-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; X86-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
;
; X64-LABEL: signbits_ashr_sext_select_shuffle_sitofp:
; X64: # %bb.0:
@@ -366,20 +353,20 @@ define <4 x float> @signbits_ashr_sext_select_shuffle_sitofp(<4 x i64> %a0, <4 x
; v32i1->v32i8 promotion and the splitting of v32i8 into 2xv16i8. This requires
; ComputeNumSignBits handling for insert_subvector.
define void @cross_bb_signbits_insert_subvec(<32 x i8>* %ptr, <32 x i8> %x, <32 x i8> %z) {
-; X32-LABEL: cross_bb_signbits_insert_subvec:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vextractf128 $1, %ymm0, %xmm2
-; X32-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; X32-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2
-; X32-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0
-; X32-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; X32-NEXT: vandnps %ymm1, %ymm0, %ymm1
-; X32-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
-; X32-NEXT: vorps %ymm1, %ymm0, %ymm0
-; X32-NEXT: vmovaps %ymm0, (%eax)
-; X32-NEXT: vzeroupper
-; X32-NEXT: retl
+; X86-LABEL: cross_bb_signbits_insert_subvec:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X86-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; X86-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2
+; X86-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0
+; X86-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-NEXT: vandnps %ymm1, %ymm0, %ymm1
+; X86-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-NEXT: vorps %ymm1, %ymm0, %ymm0
+; X86-NEXT: vmovaps %ymm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
;
; X64-LABEL: cross_bb_signbits_insert_subvec:
; X64: # %bb.0:
More information about the llvm-commits
mailing list