[llvm] r294952 - [X86][SSE] Add v4f32 and v2f64 extract to store tests
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 13 06:20:13 PST 2017
Author: rksimon
Date: Mon Feb 13 08:20:13 2017
New Revision: 294952
URL: http://llvm.org/viewvc/llvm-project?rev=294952&view=rev
Log:
[X86][SSE] Add v4f32 and v2f64 extract to store tests
Modified:
llvm/trunk/test/CodeGen/X86/extract-store.ll
Modified: llvm/trunk/test/CodeGen/X86/extract-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extract-store.ll?rev=294952&r1=294951&r2=294952&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/extract-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/extract-store.ll Mon Feb 13 08:20:13 2017
@@ -359,6 +359,127 @@ define void @extract_i64_1(i64* nocaptur
ret void
}
+define void @extract_f32_0(float* nocapture %dst, <4 x float> %foo) nounwind {
+; SSE-X32-LABEL: extract_f32_0:
+; SSE-X32: # BB#0:
+; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE-X32-NEXT: movss %xmm0, (%eax)
+; SSE-X32-NEXT: retl
+;
+; SSE-X64-LABEL: extract_f32_0:
+; SSE-X64: # BB#0:
+; SSE-X64-NEXT: movss %xmm0, (%rdi)
+; SSE-X64-NEXT: retq
+;
+; AVX-X32-LABEL: extract_f32_0:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vmovss %xmm0, (%eax)
+; AVX-X32-NEXT: retl
+;
+; AVX-X64-LABEL: extract_f32_0:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vmovss %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
+ %vecext = extractelement <4 x float> %foo, i32 0
+ store float %vecext, float* %dst, align 1
+ ret void
+}
+
+define void @extract_f32_3(float* nocapture %dst, <4 x float> %foo) nounwind {
+; SSE2-X32-LABEL: extract_f32_3:
+; SSE2-X32: # BB#0:
+; SSE2-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE2-X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-X32-NEXT: movss %xmm0, (%eax)
+; SSE2-X32-NEXT: retl
+;
+; SSE2-X64-LABEL: extract_f32_3:
+; SSE2-X64: # BB#0:
+; SSE2-X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-X64-NEXT: movss %xmm0, (%rdi)
+; SSE2-X64-NEXT: retq
+;
+; SSE41-X32-LABEL: extract_f32_3:
+; SSE41-X32: # BB#0:
+; SSE41-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE41-X32-NEXT: extractps $3, %xmm0, (%eax)
+; SSE41-X32-NEXT: retl
+;
+; SSE41-X64-LABEL: extract_f32_3:
+; SSE41-X64: # BB#0:
+; SSE41-X64-NEXT: extractps $3, %xmm0, (%rdi)
+; SSE41-X64-NEXT: retq
+;
+; AVX-X32-LABEL: extract_f32_3:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vextractps $3, %xmm0, (%eax)
+; AVX-X32-NEXT: retl
+;
+; AVX-X64-LABEL: extract_f32_3:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vextractps $3, %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
+ %vecext = extractelement <4 x float> %foo, i32 3
+ store float %vecext, float* %dst, align 1
+ ret void
+}
+
+define void @extract_f64_0(double* nocapture %dst, <2 x double> %foo) nounwind {
+; SSE-X32-LABEL: extract_f64_0:
+; SSE-X32: # BB#0:
+; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE-X32-NEXT: movlps %xmm0, (%eax)
+; SSE-X32-NEXT: retl
+;
+; SSE-X64-LABEL: extract_f64_0:
+; SSE-X64: # BB#0:
+; SSE-X64-NEXT: movlps %xmm0, (%rdi)
+; SSE-X64-NEXT: retq
+;
+; AVX-X32-LABEL: extract_f64_0:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vmovlps %xmm0, (%eax)
+; AVX-X32-NEXT: retl
+;
+; AVX-X64-LABEL: extract_f64_0:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vmovlps %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
+ %vecext = extractelement <2 x double> %foo, i32 0
+ store double %vecext, double* %dst, align 1
+ ret void
+}
+
+define void @extract_f64_1(double* nocapture %dst, <2 x double> %foo) nounwind {
+; SSE-X32-LABEL: extract_f64_1:
+; SSE-X32: # BB#0:
+; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE-X32-NEXT: movhpd %xmm0, (%eax)
+; SSE-X32-NEXT: retl
+;
+; SSE-X64-LABEL: extract_f64_1:
+; SSE-X64: # BB#0:
+; SSE-X64-NEXT: movhpd %xmm0, (%rdi)
+; SSE-X64-NEXT: retq
+;
+; AVX-X32-LABEL: extract_f64_1:
+; AVX-X32: # BB#0:
+; AVX-X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-X32-NEXT: vmovhpd %xmm0, (%eax)
+; AVX-X32-NEXT: retl
+;
+; AVX-X64-LABEL: extract_f64_1:
+; AVX-X64: # BB#0:
+; AVX-X64-NEXT: vmovhpd %xmm0, (%rdi)
+; AVX-X64-NEXT: retq
+ %vecext = extractelement <2 x double> %foo, i32 1
+ store double %vecext, double* %dst, align 1
+ ret void
+}
+
define void @extract_i8_undef(i8* nocapture %dst, <16 x i8> %foo) nounwind {
; X32-LABEL: extract_i8_undef:
; X32: # BB#0:
@@ -410,3 +531,29 @@ define void @extract_i64_undef(i64* noca
store i64 %vecext, i64* %dst, align 1
ret void
}
+
+define void @extract_f32_undef(float* nocapture %dst, <4 x float> %foo) nounwind {
+; X32-LABEL: extract_f32_undef:
+; X32: # BB#0:
+; X32-NEXT: retl
+;
+; X64-LABEL: extract_f32_undef:
+; X64: # BB#0:
+; X64-NEXT: retq
+ %vecext = extractelement <4 x float> %foo, i32 6 ; undef
+ store float %vecext, float* %dst, align 1
+ ret void
+}
+
+define void @extract_f64_undef(double* nocapture %dst, <2 x double> %foo) nounwind {
+; X32-LABEL: extract_f64_undef:
+; X32: # BB#0:
+; X32-NEXT: retl
+;
+; X64-LABEL: extract_f64_undef:
+; X64: # BB#0:
+; X64-NEXT: retq
+ %vecext = extractelement <2 x double> %foo, i32 2 ; undef
+ store double %vecext, double* %dst, align 1
+ ret void
+}
More information about the llvm-commits
mailing list