[llvm-commits] [llvm] r63195 - in /llvm/trunk: lib/Target/X86/README-SSE.txt lib/Target/X86/X86InstrSSE.td test/CodeGen/X86/swizzle.ll

Evan Cheng evan.cheng at apple.com
Wed Jan 28 00:35:06 PST 2009


Author: evancheng
Date: Wed Jan 28 02:35:02 2009
New Revision: 63195

URL: http://llvm.org/viewvc/llvm-project?rev=63195&view=rev
Log:
The memory alignment requirement on some of the mov{h|l}p{d|s} patterns are 16-byte. That is overly strict. These instructions read / write f64 memory locations without alignment requirement.

Added:
    llvm/trunk/test/CodeGen/X86/swizzle.ll
Modified:
    llvm/trunk/lib/Target/X86/README-SSE.txt
    llvm/trunk/lib/Target/X86/X86InstrSSE.td

Modified: llvm/trunk/lib/Target/X86/README-SSE.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/README-SSE.txt?rev=63195&r1=63194&r2=63195&view=diff

==============================================================================
--- llvm/trunk/lib/Target/X86/README-SSE.txt (original)
+++ llvm/trunk/lib/Target/X86/README-SSE.txt Wed Jan 28 02:35:02 2009
@@ -907,3 +907,8 @@
   cvtsi2ss 8($esp), %xmm0
 since we know the stack slot is already zext'd.
 
+//===---------------------------------------------------------------------===//
+
+Consider using movlps instead of movsd to implement (scalar_to_vector (loadf64))
+when code size is critical. movlps is slower than movsd on core2 but it's one
+byte shorter.

Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=63195&r1=63194&r2=63195&view=diff

==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Wed Jan 28 02:35:02 2009
@@ -3019,62 +3019,60 @@
 let AddedComplexity = 20 in {
 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
 // vector_shuffle v1, (load v2) <0, 1, 4, 5> using MOVHPS
-def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memop addr:$src2),
+def : Pat<(v4f32 (vector_shuffle VR128:$src1, (load addr:$src2),
                   MOVLP_shuffle_mask)),
           (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
-def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memop addr:$src2),
+def : Pat<(v2f64 (vector_shuffle VR128:$src1, (load addr:$src2),
                   MOVLP_shuffle_mask)),
           (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
-def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memop addr:$src2),
+def : Pat<(v4f32 (vector_shuffle VR128:$src1, (load addr:$src2),
                   MOVHP_shuffle_mask)),
           (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
-def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memop addr:$src2),
+def : Pat<(v2f64 (vector_shuffle VR128:$src1, (load addr:$src2),
                   MOVHP_shuffle_mask)),
           (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
 
-def : Pat<(v4i32 (vector_shuffle VR128:$src1,
-                                 (bc_v4i32 (memopv2i64 addr:$src2)),
+def : Pat<(v4i32 (vector_shuffle VR128:$src1, (load addr:$src2),
                   MOVLP_shuffle_mask)),
           (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
-def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memop addr:$src2),
+def : Pat<(v2i64 (vector_shuffle VR128:$src1, (load addr:$src2),
                   MOVLP_shuffle_mask)),
           (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
-def : Pat<(v4i32 (vector_shuffle VR128:$src1,
-                                 (bc_v4i32 (memopv2i64 addr:$src2)),
+def : Pat<(v4i32 (vector_shuffle VR128:$src1, (load addr:$src2),
                   MOVHP_shuffle_mask)),
           (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
-def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memop addr:$src2),
+def : Pat<(v2i64 (vector_shuffle VR128:$src1, (load addr:$src2),
                   MOVHP_shuffle_mask)),
           (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
 }
 
 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
 // (store (vector_shuffle (load addr), v2, <0, 1, 4, 5>), addr) using MOVHPS
-def : Pat<(store (v4f32 (vector_shuffle (memop addr:$src1), VR128:$src2,
+def : Pat<(store (v4f32 (vector_shuffle (load addr:$src1), VR128:$src2,
                          MOVLP_shuffle_mask)), addr:$src1),
           (MOVLPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
-def : Pat<(store (v2f64 (vector_shuffle (memop addr:$src1), VR128:$src2,
+def : Pat<(store (v2f64 (vector_shuffle (load addr:$src1), VR128:$src2,
                          MOVLP_shuffle_mask)), addr:$src1),
           (MOVLPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
-def : Pat<(store (v4f32 (vector_shuffle (memop addr:$src1), VR128:$src2,
+def : Pat<(store (v4f32 (vector_shuffle (load addr:$src1), VR128:$src2,
                          MOVHP_shuffle_mask)), addr:$src1),
           (MOVHPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
-def : Pat<(store (v2f64 (vector_shuffle (memop addr:$src1), VR128:$src2,
+def : Pat<(store (v2f64 (vector_shuffle (load addr:$src1), VR128:$src2,
                          MOVHP_shuffle_mask)), addr:$src1),
           (MOVHPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
 
 def : Pat<(store (v4i32 (vector_shuffle
-                         (bc_v4i32 (memopv2i64 addr:$src1)), VR128:$src2,
+                         (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2,
                          MOVLP_shuffle_mask)), addr:$src1),
           (MOVLPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
-def : Pat<(store (v2i64 (vector_shuffle (memop addr:$src1), VR128:$src2,
+def : Pat<(store (v2i64 (vector_shuffle (load addr:$src1), VR128:$src2,
                          MOVLP_shuffle_mask)), addr:$src1),
           (MOVLPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
 def : Pat<(store (v4i32 (vector_shuffle
-                         (bc_v4i32 (memopv2i64 addr:$src1)), VR128:$src2,
+                         (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2,
                          MOVHP_shuffle_mask)), addr:$src1),
           (MOVHPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
-def : Pat<(store (v2i64 (vector_shuffle (memop addr:$src1), VR128:$src2,
+def : Pat<(store (v2i64 (vector_shuffle (load addr:$src1), VR128:$src2,
                          MOVHP_shuffle_mask)), addr:$src1),
           (MOVHPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
 

Added: llvm/trunk/test/CodeGen/X86/swizzle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/swizzle.ll?rev=63195&view=auto

==============================================================================
--- llvm/trunk/test/CodeGen/X86/swizzle.ll (added)
+++ llvm/trunk/test/CodeGen/X86/swizzle.ll Wed Jan 28 02:35:02 2009
@@ -0,0 +1,19 @@
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | grep movlps
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | grep movsd
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | not grep movups
+; rdar://6523650
+
+	%struct.vector4_t = type { <4 x float> }
+
+define void @swizzle(i8* nocapture %a, %struct.vector4_t* nocapture %b, %struct.vector4_t* nocapture %c) nounwind {
+entry:
+	%0 = getelementptr %struct.vector4_t* %b, i32 0, i32 0		; <<4 x float>*> [#uses=2]
+	%1 = load <4 x float>* %0, align 4		; <<4 x float>> [#uses=1]
+	%tmp.i = bitcast i8* %a to double*		; <double*> [#uses=1]
+	%tmp1.i = load double* %tmp.i		; <double> [#uses=1]
+	%2 = insertelement <2 x double> undef, double %tmp1.i, i32 0		; <<2 x double>> [#uses=1]
+	%tmp2.i = bitcast <2 x double> %2 to <4 x float>		; <<4 x float>> [#uses=1]
+	%3 = shufflevector <4 x float> %1, <4 x float> %tmp2.i, <4 x i32> < i32 4, i32 5, i32 2, i32 3 >		; <<4 x float>> [#uses=1]
+	store <4 x float> %3, <4 x float>* %0, align 4
+	ret void
+}





More information about the llvm-commits mailing list