[llvm] r280214 - [X86][SSE] Improve awareness of fptrunc implicit zeroing of upper 64-bits of xmm result
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Aug 31 03:35:14 PDT 2016
Author: rksimon
Date: Wed Aug 31 05:35:13 2016
New Revision: 280214
URL: http://llvm.org/viewvc/llvm-project?rev=280214&view=rev
Log:
[X86][SSE] Improve awareness of fptrunc implicit zeroing of upper 64-bits of xmm result
Add patterns to avoid inserting unnecessary zeroing shuffles when lowering fptrunc to (v)cvtpd2ps
Differential Revision: https://reviews.llvm.org/D23797
Modified:
llvm/trunk/lib/Target/X86/X86InstrSSE.td
llvm/trunk/test/CodeGen/X86/vec_fptrunc.ll
Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=280214&r1=280213&r2=280214&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Wed Aug 31 05:35:13 2016
@@ -2288,6 +2288,9 @@ let Predicates = [HasAVX] in {
let Predicates = [HasAVX, NoVLX] in {
// Match fpround and fpextend for 128/256-bit conversions
+ def : Pat<(v4f32 (bitconvert (X86vzmovl (v2f64 (bitconvert
+ (v4f32 (X86vfpround (v2f64 VR128:$src)))))))),
+ (VCVTPD2PSrr VR128:$src)>;
def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
(VCVTPD2PSrr VR128:$src)>;
def : Pat<(v4f32 (X86vfpround (loadv2f64 addr:$src))),
@@ -2307,6 +2310,9 @@ let Predicates = [HasAVX, NoVLX] in {
let Predicates = [UseSSE2] in {
// Match fpround and fpextend for 128 conversions
+ def : Pat<(v4f32 (bitconvert (X86vzmovl (v2f64 (bitconvert
+ (v4f32 (X86vfpround (v2f64 VR128:$src)))))))),
+ (CVTPD2PSrr VR128:$src)>;
def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
(CVTPD2PSrr VR128:$src)>;
def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))),
Modified: llvm/trunk/test/CodeGen/X86/vec_fptrunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_fptrunc.ll?rev=280214&r1=280213&r2=280214&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_fptrunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_fptrunc.ll Wed Aug 31 05:35:13 2016
@@ -135,62 +135,54 @@ entry:
define <4 x float> @fptrunc_frommem2_zext(<2 x double> * %ld) {
; X32-SSE-LABEL: fptrunc_frommem2_zext:
-; X32-SSE: # BB#0:
-; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT: cvtpd2ps (%eax), %xmm0
-; X32-SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
-; X32-SSE-NEXT: retl
-;
-; X32-AVX-LABEL: fptrunc_frommem2_zext:
-; X32-AVX: # BB#0:
-; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT: vcvtpd2psx (%eax), %xmm0
-; X32-AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
-; X32-AVX-NEXT: retl
-;
-; X64-SSE-LABEL: fptrunc_frommem2_zext:
-; X64-SSE: # BB#0:
-; X64-SSE-NEXT: cvtpd2ps (%rdi), %xmm0
-; X64-SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
-; X64-SSE-NEXT: retq
-;
-; X64-AVX-LABEL: fptrunc_frommem2_zext:
-; X64-AVX: # BB#0:
-; X64-AVX-NEXT: vcvtpd2psx (%rdi), %xmm0
-; X64-AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
-; X64-AVX-NEXT: retq
- %arg = load <2 x double>, <2 x double> * %ld, align 16
- %cvt = fptrunc <2 x double> %arg to <2 x float>
+; X32-SSE: # BB#0:
+; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-SSE-NEXT: cvtpd2ps (%eax), %xmm0
+; X32-SSE-NEXT: retl
+;
+; X32-AVX-LABEL: fptrunc_frommem2_zext:
+; X32-AVX: # BB#0:
+; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-AVX-NEXT: vcvtpd2psx (%eax), %xmm0
+; X32-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fptrunc_frommem2_zext:
+; X64-SSE: # BB#0:
+; X64-SSE-NEXT: cvtpd2ps (%rdi), %xmm0
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fptrunc_frommem2_zext:
+; X64-AVX: # BB#0:
+; X64-AVX-NEXT: vcvtpd2psx (%rdi), %xmm0
+; X64-AVX-NEXT: retq
+ %arg = load <2 x double>, <2 x double> * %ld, align 16
+ %cvt = fptrunc <2 x double> %arg to <2 x float>
%ret = shufflevector <2 x float> %cvt, <2 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
ret <4 x float> %ret
}
define <4 x float> @fptrunc_fromreg2_zext(<2 x double> %arg) {
-; X32-SSE-LABEL: fptrunc_fromreg2_zext:
-; X32-SSE: # BB#0:
-; X32-SSE-NEXT: cvtpd2ps %xmm0, %xmm0
-; X32-SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
-; X32-SSE-NEXT: retl
-;
-; X32-AVX-LABEL: fptrunc_fromreg2_zext:
-; X32-AVX: # BB#0:
-; X32-AVX-NEXT: vcvtpd2ps %xmm0, %xmm0
-; X32-AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
-; X32-AVX-NEXT: retl
-;
-; X64-SSE-LABEL: fptrunc_fromreg2_zext:
-; X64-SSE: # BB#0:
-; X64-SSE-NEXT: cvtpd2ps %xmm0, %xmm0
-; X64-SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
-; X64-SSE-NEXT: retq
-;
-; X64-AVX-LABEL: fptrunc_fromreg2_zext:
-; X64-AVX: # BB#0:
-; X64-AVX-NEXT: vcvtpd2ps %xmm0, %xmm0
-; X64-AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
-; X64-AVX-NEXT: retq
- %cvt = fptrunc <2 x double> %arg to <2 x float>
- %ret = shufflevector <2 x float> %cvt, <2 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
+; X32-SSE-LABEL: fptrunc_fromreg2_zext:
+; X32-SSE: # BB#0:
+; X32-SSE-NEXT: cvtpd2ps %xmm0, %xmm0
+; X32-SSE-NEXT: retl
+;
+; X32-AVX-LABEL: fptrunc_fromreg2_zext:
+; X32-AVX: # BB#0:
+; X32-AVX-NEXT: vcvtpd2ps %xmm0, %xmm0
+; X32-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fptrunc_fromreg2_zext:
+; X64-SSE: # BB#0:
+; X64-SSE-NEXT: cvtpd2ps %xmm0, %xmm0
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fptrunc_fromreg2_zext:
+; X64-AVX: # BB#0:
+; X64-AVX-NEXT: vcvtpd2ps %xmm0, %xmm0
+; X64-AVX-NEXT: retq
+ %cvt = fptrunc <2 x double> %arg to <2 x float>
+ %ret = shufflevector <2 x float> %cvt, <2 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
ret <4 x float> %ret
}
More information about the llvm-commits
mailing list