[llvm-commits] [llvm] r163196 - /llvm/trunk/lib/Target/X86/X86InstrSSE.td

Craig Topper craig.topper at gmail.com
Tue Sep 4 23:58:39 PDT 2012


Author: ctopper
Date: Wed Sep  5 01:58:39 2012
New Revision: 163196

URL: http://llvm.org/viewvc/llvm-project?rev=163196&view=rev
Log:
Add patterns for integer forms of VINSERTF128/VINSERTI128 folded with loads. Also add patterns to turn subvector inserts with loads to index 0 of an undef into VMOVAPS.

Modified:
    llvm/trunk/lib/Target/X86/X86InstrSSE.td

Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=163196&r1=163195&r2=163196&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Wed Sep  5 01:58:39 2012
@@ -1017,6 +1017,48 @@
             (VMOVUPSYmr addr:$dst, VR256:$src)>;
   def : Pat<(store (v32i8 VR256:$src), addr:$dst),
             (VMOVUPSYmr addr:$dst, VR256:$src)>;
+
+  // Special patterns for handling subvector inserts folded with loads
+  def : Pat<(insert_subvector undef, (alignedloadv4f32 addr:$src), (i32 0)),
+            (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
+                           (v4f32 (VMOVAPSrm addr:$src)), sub_xmm)>;
+  def : Pat<(insert_subvector undef, (alignedloadv2f64 addr:$src), (i32 0)),
+            (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
+                           (v2f64 (VMOVAPDrm addr:$src)), sub_xmm)>;
+  def : Pat<(insert_subvector undef, (alignedloadv2i64 addr:$src), (i32 0)),
+            (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
+                           (v2i64 (VMOVAPSrm addr:$src)), sub_xmm)>;
+  def : Pat<(insert_subvector undef,
+             (bc_v4i32 (alignedloadv2i64 addr:$src)), (i32 0)),
+            (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
+                           (v4i32 (VMOVAPSrm addr:$src)), sub_xmm)>;
+  def : Pat<(insert_subvector undef,
+             (bc_v8i16 (alignedloadv2i64 addr:$src)), (i32 0)),
+            (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)),
+                           (v8i16 (VMOVAPSrm addr:$src)), sub_xmm)>;
+  def : Pat<(insert_subvector undef,
+             (bc_v16i8 (alignedloadv2i64 addr:$src)), (i32 0)),
+            (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)),
+                           (v16i8 (VMOVAPSrm addr:$src)), sub_xmm)>;
+
+  def : Pat<(insert_subvector undef, (loadv4f32 addr:$src), (i32 0)),
+            (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
+                           (v4f32 (VMOVUPSrm addr:$src)), sub_xmm)>;
+  def : Pat<(insert_subvector undef, (loadv2f64 addr:$src), (i32 0)),
+            (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
+                           (v2f64 (VMOVUPDrm addr:$src)), sub_xmm)>;
+  def : Pat<(insert_subvector undef, (loadv2i64 addr:$src), (i32 0)),
+            (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
+                           (v2i64 (VMOVUPSrm addr:$src)), sub_xmm)>;
+  def : Pat<(insert_subvector undef, (bc_v4i32 (loadv2i64 addr:$src)), (i32 0)),
+            (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
+                           (v4i32 (VMOVUPSrm addr:$src)), sub_xmm)>;
+  def : Pat<(insert_subvector undef, (bc_v8i16 (loadv2i64 addr:$src)), (i32 0)),
+            (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)),
+                           (v8i16 (VMOVUPSrm addr:$src)), sub_xmm)>;
+  def : Pat<(insert_subvector undef, (bc_v16i8 (loadv2i64 addr:$src)), (i32 0)),
+            (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)),
+                           (v16i8 (VMOVUPSrm addr:$src)), sub_xmm)>;
 }
 
 // Use movaps / movups for SSE integer load / store (one byte shorter).
@@ -7221,11 +7263,11 @@
           (VINSERTF128rr VR256:$src1, VR128:$src2,
                          (INSERT_get_vinsertf128_imm VR256:$ins))>;
 
-def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (loadv4f32 addr:$src2),
+def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (memopv4f32 addr:$src2),
                                    (i32 imm)),
           (VINSERTF128rm VR256:$src1, addr:$src2,
                          (INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (loadv2f64 addr:$src2),
+def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (memopv2f64 addr:$src2),
                                    (i32 imm)),
           (VINSERTF128rm VR256:$src1, addr:$src2,
                          (INSERT_get_vinsertf128_imm VR256:$ins))>;
@@ -7249,7 +7291,22 @@
           (VINSERTF128rr VR256:$src1, VR128:$src2,
                          (INSERT_get_vinsertf128_imm VR256:$ins))>;
 
-def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
+def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (memopv2i64 addr:$src2),
+                                   (i32 imm)),
+          (VINSERTF128rm VR256:$src1, addr:$src2,
+                         (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1),
+                                   (bc_v4i32 (memopv2i64 addr:$src2)),
+                                   (i32 imm)),
+          (VINSERTF128rm VR256:$src1, addr:$src2,
+                         (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1),
+                                   (bc_v16i8 (memopv2i64 addr:$src2)),
+                                   (i32 imm)),
+          (VINSERTF128rm VR256:$src1, addr:$src2,
+                         (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1),
+                                   (bc_v8i16 (memopv2i64 addr:$src2)),
                                    (i32 imm)),
           (VINSERTF128rm VR256:$src1, addr:$src2,
                          (INSERT_get_vinsertf128_imm VR256:$ins))>;
@@ -7809,7 +7866,22 @@
           (VINSERTI128rr VR256:$src1, VR128:$src2,
                          (INSERT_get_vinsertf128_imm VR256:$ins))>;
 
-def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
+def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (memopv2i64 addr:$src2),
+                                   (i32 imm)),
+          (VINSERTI128rm VR256:$src1, addr:$src2,
+                         (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1),
+                                   (bc_v4i32 (memopv2i64 addr:$src2)),
+                                   (i32 imm)),
+          (VINSERTI128rm VR256:$src1, addr:$src2,
+                         (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1),
+                                   (bc_v16i8 (memopv2i64 addr:$src2)),
+                                   (i32 imm)),
+          (VINSERTI128rm VR256:$src1, addr:$src2,
+                         (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1),
+                                   (bc_v8i16 (memopv2i64 addr:$src2)),
                                    (i32 imm)),
           (VINSERTI128rm VR256:$src1, addr:$src2,
                          (INSERT_get_vinsertf128_imm VR256:$ins))>;





More information about the llvm-commits mailing list