[llvm] r223929 - Match new shuffle codegen for MOVHPD patterns
Sanjay Patel
spatel at rotateright.com
Wed Dec 10 08:58:55 PST 2014
Author: spatel
Date: Wed Dec 10 10:58:54 2014
New Revision: 223929
URL: http://llvm.org/viewvc/llvm-project?rev=223929&view=rev
Log:
Match new shuffle codegen for MOVHPD patterns
Add patterns to match SSE (shufpd) and AVX (vpermilpd) shuffle codegen
when storing the high element of a v2f64. The existing patterns were
only checking for an unpckh type of shuffle.
http://llvm.org/bugs/show_bug.cgi?id=21791
Differential Revision: http://reviews.llvm.org/D6586
Modified:
llvm/trunk/lib/Target/X86/X86InstrSSE.td
llvm/trunk/test/CodeGen/X86/extractelement-load.ll
Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=223929&r1=223928&r2=223929&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Wed Dec 10 10:58:54 2014
@@ -1332,6 +1332,8 @@ let Predicates = [HasAVX] in {
(bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
(VMOVHPSrm VR128:$src1, addr:$src2)>;
+ // VMOVHPD patterns
+
// FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
// is during lowering, where it's not possible to recognize the load fold
// cause it has two uses through a bitcast. One use disappears at isel time
@@ -1344,6 +1346,11 @@ let Predicates = [HasAVX] in {
def : Pat<(v2f64 (X86Unpckl VR128:$src1,
(bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
(VMOVHPDrm VR128:$src1, addr:$src2)>;
+
+ def : Pat<(store (f64 (vector_extract
+ (v2f64 (X86VPermilpi VR128:$src, (i8 1))),
+ (iPTR 0))), addr:$dst),
+ (VMOVHPDmr addr:$dst, VR128:$src)>;
}
let Predicates = [UseSSE1] in {
@@ -1357,6 +1364,8 @@ let Predicates = [UseSSE1] in {
}
let Predicates = [UseSSE2] in {
+ // MOVHPD patterns
+
// FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
// is during lowering, where it's not possible to recognize the load fold
// cause it has two uses through a bitcast. One use disappears at isel time
@@ -1369,6 +1378,11 @@ let Predicates = [UseSSE2] in {
def : Pat<(v2f64 (X86Unpckl VR128:$src1,
(bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
(MOVHPDrm VR128:$src1, addr:$src2)>;
+
+ def : Pat<(store (f64 (vector_extract
+ (v2f64 (X86Shufp VR128:$src, VR128:$src, (i8 1))),
+ (iPTR 0))), addr:$dst),
+ (MOVHPDmr addr:$dst, VR128:$src)>;
}
//===----------------------------------------------------------------------===//
Modified: llvm/trunk/test/CodeGen/X86/extractelement-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extractelement-load.ll?rev=223929&r1=223928&r2=223929&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/extractelement-load.ll (original)
+++ llvm/trunk/test/CodeGen/X86/extractelement-load.ll Wed Dec 10 10:58:54 2014
@@ -1,5 +1,6 @@
; RUN: llc < %s -march=x86 -mattr=+sse2 -mcpu=yonah | FileCheck %s
; RUN: llc < %s -march=x86-64 -mattr=+sse2 -mcpu=core2 | FileCheck %s
+; RUN: llc < %s -march=x86-64 -mattr=+avx -mcpu=btver2 | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -29,16 +30,15 @@ undef, i32 7, i32 9, i32 undef, i32 13,
; This case could easily end up inf-looping in the DAG combiner due to an
; low alignment load of the vector which prevents us from reliably forming a
; narrow load.
-; FIXME: It would be nice to detect whether the target has fast and legal
-; unaligned loads and use them here.
+
+; The expected codegen is identical for the AVX case except
+; load/store instructions will have a leading 'v', so we don't
+; need to special-case the checks.
+
define void @t3() {
; CHECK-LABEL: t3:
-;
-; This movs the entire vector, shuffling the high double down. If we fixed the
-; FIXME above it would just move the high double directly.
; CHECK: movupd
-; CHECK: shufpd
-; CHECK: movlpd
+; CHECK: movhpd
bb:
%tmp13 = load <2 x double>* undef, align 1
More information about the llvm-commits
mailing list