[llvm] r351354 - [x86] add tests for extracted scalar casts (PR39974); NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 16 08:11:30 PST 2019


Author: spatel
Date: Wed Jan 16 08:11:30 2019
New Revision: 351354

URL: http://llvm.org/viewvc/llvm-project?rev=351354&view=rev
Log:
[x86] add tests for extracted scalar casts (PR39974); NFC

https://bugs.llvm.org/show_bug.cgi?id=39974


Modified:
    llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll

Modified: llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll?rev=351354&r1=351353&r2=351354&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll Wed Jan 16 08:11:30 2019
@@ -5550,3 +5550,208 @@ define <4 x float> @sitofp_i64_to_4f32(<
   %res = insertelement <4 x float> %a0, float %cvt, i32 0
   ret <4 x float> %res
 }
+
+; Extract from int vector and convert to FP.
+
+define float @extract0_sitofp_v4i32_f32(<4 x i32> %x) nounwind {
+; SSE-LABEL: extract0_sitofp_v4i32_f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    cvtsi2ssl %eax, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract0_sitofp_v4i32_f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    vcvtsi2ssl %eax, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 0
+  %r = sitofp i32 %e to float
+  ret float %r
+}
+
+define double @extract0_sitofp_v4i32_f64(<4 x i32> %x) nounwind {
+; SSE-LABEL: extract0_sitofp_v4i32_f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    cvtsi2sdl %eax, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: extract0_sitofp_v4i32_f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    vcvtsi2sdl %eax, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 0
+  %r = sitofp i32 %e to double
+  ret double %r
+}
+
+define float @extract0_uitofp_v4i32_f32(<4 x i32> %x) nounwind {
+; SSE-LABEL: extract0_uitofp_v4i32_f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE-NEXT:    retq
+;
+; VEX-LABEL: extract0_uitofp_v4i32_f32:
+; VEX:       # %bb.0:
+; VEX-NEXT:    vmovd %xmm0, %eax
+; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm0
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: extract0_uitofp_v4i32_f32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    vcvtusi2ssl %eax, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 0
+  %r = uitofp i32 %e to float
+  ret float %r
+}
+
+define double @extract0_uitofp_v4i32_f64(<4 x i32> %x) nounwind {
+; SSE-LABEL: extract0_uitofp_v4i32_f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE-NEXT:    retq
+;
+; VEX-LABEL: extract0_uitofp_v4i32_f64:
+; VEX:       # %bb.0:
+; VEX-NEXT:    vmovd %xmm0, %eax
+; VEX-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm0
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: extract0_uitofp_v4i32_f64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    vcvtusi2sdl %eax, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 0
+  %r = uitofp i32 %e to double
+  ret double %r
+}
+
+; Extract non-zero element from int vector and convert to FP.
+
+define float @extract3_sitofp_v4i32_f32(<4 x i32> %x) nounwind {
+; SSE2-LABEL: extract3_sitofp_v4i32_f32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    xorps %xmm0, %xmm0
+; SSE2-NEXT:    cvtsi2ssl %eax, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract3_sitofp_v4i32_f32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    extractps $3, %xmm0, %eax
+; SSE41-NEXT:    xorps %xmm0, %xmm0
+; SSE41-NEXT:    cvtsi2ssl %eax, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract3_sitofp_v4i32_f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vextractps $3, %xmm0, %eax
+; AVX-NEXT:    vcvtsi2ssl %eax, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 3
+  %r = sitofp i32 %e to float
+  ret float %r
+}
+
+define double @extract3_sitofp_v4i32_f64(<4 x i32> %x) nounwind {
+; SSE2-LABEL: extract3_sitofp_v4i32_f64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    xorps %xmm0, %xmm0
+; SSE2-NEXT:    cvtsi2sdl %eax, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract3_sitofp_v4i32_f64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    extractps $3, %xmm0, %eax
+; SSE41-NEXT:    xorps %xmm0, %xmm0
+; SSE41-NEXT:    cvtsi2sdl %eax, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: extract3_sitofp_v4i32_f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vextractps $3, %xmm0, %eax
+; AVX-NEXT:    vcvtsi2sdl %eax, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 3
+  %r = sitofp i32 %e to double
+  ret double %r
+}
+
+define float @extract3_uitofp_v4i32_f32(<4 x i32> %x) nounwind {
+; SSE2-LABEL: extract3_uitofp_v4i32_f32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    xorps %xmm0, %xmm0
+; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract3_uitofp_v4i32_f32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    extractps $3, %xmm0, %eax
+; SSE41-NEXT:    xorps %xmm0, %xmm0
+; SSE41-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE41-NEXT:    retq
+;
+; VEX-LABEL: extract3_uitofp_v4i32_f32:
+; VEX:       # %bb.0:
+; VEX-NEXT:    vextractps $3, %xmm0, %eax
+; VEX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm0
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: extract3_uitofp_v4i32_f32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vextractps $3, %xmm0, %eax
+; AVX512-NEXT:    vcvtusi2ssl %eax, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 3
+  %r = uitofp i32 %e to float
+  ret float %r
+}
+
+define double @extract3_uitofp_v4i32_f64(<4 x i32> %x) nounwind {
+; SSE2-LABEL: extract3_uitofp_v4i32_f64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    xorps %xmm0, %xmm0
+; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: extract3_uitofp_v4i32_f64:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    extractps $3, %xmm0, %eax
+; SSE41-NEXT:    xorps %xmm0, %xmm0
+; SSE41-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE41-NEXT:    retq
+;
+; VEX-LABEL: extract3_uitofp_v4i32_f64:
+; VEX:       # %bb.0:
+; VEX-NEXT:    vextractps $3, %xmm0, %eax
+; VEX-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm0
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: extract3_uitofp_v4i32_f64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vextractps $3, %xmm0, %eax
+; AVX512-NEXT:    vcvtusi2sdl %eax, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+  %e = extractelement <4 x i32> %x, i32 3
+  %r = uitofp i32 %e to double
+  ret double %r
+}
+




More information about the llvm-commits mailing list