[llvm] r365266 - [X86] Add patterns to select MOVLPDrm from MOVSD+load and MOVHPD from UNPCKL+load.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sat Jul 6 10:59:45 PDT 2019
Author: ctopper
Date: Sat Jul 6 10:59:45 2019
New Revision: 365266
URL: http://llvm.org/viewvc/llvm-project?rev=365266&view=rev
Log:
[X86] Add patterns to select MOVLPDrm from MOVSD+load and MOVHPD from UNPCKL+load.
These narrow the load so we can only do it if the load isn't
volatile.
There also tests in vector-shuffle-128-v4.ll that this should
support, but we don't seem to fold bitcast+load on pre-sse4.2
targets due to the slow unaligned mem 16 flag.
Modified:
llvm/trunk/lib/Target/X86/X86InstrSSE.td
llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll
Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=365266&r1=365265&r2=365266&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Sat Jul 6 10:59:45 2019
@@ -748,6 +748,13 @@ let Predicates = [UseSSE2] in {
(MOVLPDrm VR128:$src1, addr:$src2)>;
}
+let Predicates = [UseSSE2, NoSSE41_Or_OptForSize] in {
+ // Use MOVLPD to load into the low bits from a full vector unless we can use
+ // BLENDPD.
+ def : Pat<(X86Movsd VR128:$src1, (v2f64 (nonvolatile_load addr:$src2))),
+ (MOVLPDrm VR128:$src1, addr:$src2)>;
+}
+
//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Move Low to High and High to Low packed FP Instructions
//===----------------------------------------------------------------------===//
@@ -2075,6 +2082,13 @@ let Predicates = [HasAVX1Only] in {
(VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
}
+let Predicates = [UseSSE2] in {
+ // Use MOVHPD if the load isn't aligned enough for UNPCKLPD.
+ def : Pat<(v2f64 (X86Unpckl VR128:$src1,
+ (v2f64 (nonvolatile_load addr:$src2)))),
+ (MOVHPDrm VR128:$src1, addr:$src2)>;
+}
+
//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Extract Floating-Point Sign mask
//===----------------------------------------------------------------------===//
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll?rev=365266&r1=365265&r2=365266&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll Sat Jul 6 10:59:45 2019
@@ -1309,8 +1309,7 @@ define <2 x double> @shuffle_mem_v2f64_3
define <2 x double> @shuffle_mem_v2f64_02(<2 x double> %a, <2 x double>* %pb) {
; SSE-LABEL: shuffle_mem_v2f64_02:
; SSE: # %bb.0:
-; SSE-NEXT: movups (%rdi), %xmm1
-; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; SSE-NEXT: retq
;
; AVX-LABEL: shuffle_mem_v2f64_02:
@@ -1325,20 +1324,17 @@ define <2 x double> @shuffle_mem_v2f64_0
define <2 x double> @shuffle_mem_v2f64_21(<2 x double> %a, <2 x double>* %pb) {
; SSE2-LABEL: shuffle_mem_v2f64_21:
; SSE2: # %bb.0:
-; SSE2-NEXT: movupd (%rdi), %xmm1
-; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE2-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; SSE2-NEXT: retq
;
; SSE3-LABEL: shuffle_mem_v2f64_21:
; SSE3: # %bb.0:
-; SSE3-NEXT: movupd (%rdi), %xmm1
-; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE3-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: shuffle_mem_v2f64_21:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: movupd (%rdi), %xmm1
-; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSSE3-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_mem_v2f64_21:
More information about the llvm-commits
mailing list