[llvm-branch-commits] [llvm] 0ca81b9 - [X86][SSE] Add uitofp(trunc(and(lshr(x, c)))) vector test
Simon Pilgrim via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Thu Jan 21 04:43:49 PST 2021
Author: Simon Pilgrim
Date: 2021-01-21T12:38:36Z
New Revision: 0ca81b90d19d395c4891b7507cec0f063dd26d22
URL: https://github.com/llvm/llvm-project/commit/0ca81b90d19d395c4891b7507cec0f063dd26d22
DIFF: https://github.com/llvm/llvm-project/commit/0ca81b90d19d395c4891b7507cec0f063dd26d22.diff
LOG: [X86][SSE] Add uitofp(trunc(and(lshr(x,c)))) vector test
Reduced from regression reported by @hans on D56387
Added:
Modified:
llvm/test/CodeGen/X86/uint_to_fp-3.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/uint_to_fp-3.ll b/llvm/test/CodeGen/X86/uint_to_fp-3.ll
index ca46b48b7731..5f1c3ec69a34 100644
--- a/llvm/test/CodeGen/X86/uint_to_fp-3.ll
+++ b/llvm/test/CodeGen/X86/uint_to_fp-3.ll
@@ -69,3 +69,64 @@ define <4 x double> @mask_ucvt_4i32_4f64(<4 x i32> %a) {
%cvt = uitofp <4 x i32> %and to <4 x double>
ret <4 x double> %cvt
}
+
+; Regression noticed in D56387
+define <4 x float> @lshr_truncate_mask_ucvt_4i64_4f32(<4 x i64> *%p0) {
+; X32-SSE-LABEL: lshr_truncate_mask_ucvt_4i64_4f32:
+; X32-SSE: # %bb.0:
+; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-SSE-NEXT: movdqu (%eax), %xmm0
+; X32-SSE-NEXT: movdqu 16(%eax), %xmm1
+; X32-SSE-NEXT: psrlq $16, %xmm1
+; X32-SSE-NEXT: psrlq $16, %xmm0
+; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
+; X32-SSE-NEXT: mulps {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: retl
+;
+; X32-AVX-LABEL: lshr_truncate_mask_ucvt_4i64_4f32:
+; X32-AVX: # %bb.0:
+; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-AVX-NEXT: vmovdqu (%eax), %xmm0
+; X32-AVX-NEXT: vmovdqu 16(%eax), %xmm1
+; X32-AVX-NEXT: vpsrlq $16, %xmm1, %xmm1
+; X32-AVX-NEXT: vpsrlq $16, %xmm0, %xmm0
+; X32-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; X32-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X32-AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
+; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X32-AVX-NEXT: vmulps {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: lshr_truncate_mask_ucvt_4i64_4f32:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE-NEXT: movdqu 16(%rdi), %xmm1
+; X64-SSE-NEXT: psrlq $16, %xmm1
+; X64-SSE-NEXT: psrlq $16, %xmm0
+; X64-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm0
+; X64-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
+; X64-SSE-NEXT: mulps {{.*}}(%rip), %xmm0
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: lshr_truncate_mask_ucvt_4i64_4f32:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX-NEXT: vmovdqu 16(%rdi), %xmm1
+; X64-AVX-NEXT: vpsrlq $16, %xmm1, %xmm1
+; X64-AVX-NEXT: vpsrlq $16, %xmm0, %xmm0
+; X64-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; X64-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X64-AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
+; X64-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X64-AVX-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: retq
+ %load = load <4 x i64>, <4 x i64>* %p0, align 2
+ %lshr = lshr <4 x i64> %load, <i64 16, i64 16, i64 16, i64 16>
+ %and = and <4 x i64> %lshr, <i64 65535, i64 65535, i64 65535, i64 65535>
+ %uitofp = uitofp <4 x i64> %and to <4 x float>
+ %fmul = fmul <4 x float> %uitofp, <float 0x3EF0001000000000, float 0x3EF0001000000000, float 0x3EF0001000000000, float 0x3EF0001000000000>
+ ret <4 x float> %fmul
+}
More information about the llvm-branch-commits
mailing list