<div dir="ltr"><div><div>Hi Chandler, <br><br>This change adds code size (1-2 bytes per blend instruction) and doesn't improve performance for chips other than Sandybridge and Haswell AFAICT, but it was enabled for all conditions and targets.<br><br></div>Do you have perf measurements that show the win on SB or Haswell?<br><br></div>I stumbled over these patterns while fixing PR23073 ( <a href="https://llvm.org/bugs/show_bug.cgi?id=23073">https://llvm.org/bugs/show_bug.cgi?id=23073</a> ), and my initial reaction was that these patterns were a mistake. More details in:<br><a href="http://reviews.llvm.org/D8794">http://reviews.llvm.org/D8794</a><br><br><br></div><div class="gmail_extra"><br><div class="gmail_quote">On Fri, Oct 3, 2014 at 3:38 PM, Chandler Carruth <span dir="ltr"><<a href="mailto:chandlerc@gmail.com" target="_blank">chandlerc@gmail.com</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">Author: chandlerc<br>
Date: Fri Oct 3 16:38:49 2014<br>
New Revision: 219022<br>
<br>
URL: <a href="http://llvm.org/viewvc/llvm-project?rev=219022&view=rev" target="_blank">http://llvm.org/viewvc/llvm-project?rev=219022&view=rev</a><br>
Log:<br>
[x86] Adjust the patterns for lowering X86vzmovl nodes which don't<br>
perform a load to use blendps rather than movss when it is available.<br>
<br>
For non-loads, blendps is *much* faster. It can execute on two ports in<br>
Sandy Bridge and Ivy Bridge, and *three* ports on Haswell. This fixes<br>
one of the "regressions" from aggressively taking the "insertion" path<br>
in the new vector shuffle lowering.<br>
<br>
This does highlight one problem with blendps -- it isn't commuted as<br>
heavily as it should be. That's future work though.<br>
<br>
Modified:<br>
llvm/trunk/lib/Target/X86/X86InstrInfo.td<br>
llvm/trunk/lib/Target/X86/X86InstrSSE.td<br>
llvm/trunk/test/CodeGen/X86/combine-or.ll<br>
llvm/trunk/test/CodeGen/X86/sse41.ll<br>
llvm/trunk/test/CodeGen/X86/vec_set-3.ll<br>
llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll<br>
llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll<br>
<br>
Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.td<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.td?rev=219022&r1=219021&r2=219022&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.td?rev=219022&r1=219021&r2=219022&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Target/X86/X86InstrInfo.td (original)<br>
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.td Fri Oct 3 16:38:49 2014<br>
@@ -693,6 +693,7 @@ def UseSSE3 : Predicate<"Subtarget-<br>
def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">;<br>
def UseSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">;<br>
def HasSSE41 : Predicate<"Subtarget->hasSSE41()">;<br>
+def NoSSE41 : Predicate<"!Subtarget->hasSSE41()">;<br>
def UseSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">;<br>
def HasSSE42 : Predicate<"Subtarget->hasSSE42()">;<br>
def UseSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">;<br>
<br>
Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=219022&r1=219021&r2=219022&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=219022&r1=219021&r2=219022&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)<br>
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Fri Oct 3 16:38:49 2014<br>
@@ -612,29 +612,6 @@ let canFoldAsLoad = 1, isReMaterializabl<br>
<br>
// Patterns<br>
let Predicates = [UseAVX] in {<br>
- let AddedComplexity = 15 in {<br>
- // Move scalar to XMM zero-extended, zeroing a VR128 then do a<br>
- // MOVS{S,D} to the lower bits.<br>
- def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),<br>
- (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)>;<br>
- def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),<br>
- (VMOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;<br>
- def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),<br>
- (VMOVSSrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;<br>
- def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),<br>
- (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)>;<br>
-<br>
- // Move low f32 and clear high bits.<br>
- def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),<br>
- (SUBREG_TO_REG (i32 0),<br>
- (VMOVSSrr (v4f32 (V_SET0)),<br>
- (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm)), sub_xmm)>;<br>
- def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),<br>
- (SUBREG_TO_REG (i32 0),<br>
- (VMOVSSrr (v4i32 (V_SET0)),<br>
- (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm)), sub_xmm)>;<br>
- }<br>
-<br>
let AddedComplexity = 20 in {<br>
// MOVSSrm zeros the high parts of the register; represent this<br>
// with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0<br>
@@ -670,31 +647,10 @@ let Predicates = [UseAVX] in {<br>
(v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),<br>
(SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;<br>
}<br>
- def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,<br>
- (v4f32 (scalar_to_vector FR32:$src)), (iPTR 0)))),<br>
- (SUBREG_TO_REG (i32 0),<br>
- (v4f32 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)),<br>
- sub_xmm)>;<br>
- def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,<br>
- (v2f64 (scalar_to_vector FR64:$src)), (iPTR 0)))),<br>
- (SUBREG_TO_REG (i64 0),<br>
- (v2f64 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)),<br>
- sub_xmm)>;<br>
def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,<br>
(v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),<br>
(SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_xmm)>;<br>
<br>
- // Move low f64 and clear high bits.<br>
- def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),<br>
- (SUBREG_TO_REG (i32 0),<br>
- (VMOVSDrr (v2f64 (V_SET0)),<br>
- (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm)), sub_xmm)>;<br>
-<br>
- def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),<br>
- (SUBREG_TO_REG (i32 0),<br>
- (VMOVSDrr (v2i64 (V_SET0)),<br>
- (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm)), sub_xmm)>;<br>
-<br>
// Extract and store.<br>
def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),<br>
addr:$dst),<br>
@@ -745,7 +701,6 @@ let Predicates = [UseAVX] in {<br>
(EXTRACT_SUBREG (v4f64 VR256:$src2), sub_xmm)),<br>
sub_xmm)>;<br>
<br>
-<br>
// FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem<br>
// is during lowering, where it's not possible to recognize the fold cause<br>
// it has two uses through a bitcast. One use disappears at isel time and the<br>
@@ -761,7 +716,7 @@ let Predicates = [UseAVX] in {<br>
}<br>
<br>
let Predicates = [UseSSE1] in {<br>
- let AddedComplexity = 15 in {<br>
+ let Predicates = [NoSSE41], AddedComplexity = 15 in {<br>
// Move scalar to XMM zero-extended, zeroing a VR128 then do a<br>
// MOVSS to the lower bits.<br>
def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),<br>
@@ -795,7 +750,7 @@ let Predicates = [UseSSE1] in {<br>
}<br>
<br>
let Predicates = [UseSSE2] in {<br>
- let AddedComplexity = 15 in {<br>
+ let Predicates = [NoSSE41], AddedComplexity = 15 in {<br>
// Move scalar to XMM zero-extended, zeroing a VR128 then do a<br>
// MOVSD to the lower bits.<br>
def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),<br>
@@ -7576,6 +7531,57 @@ let Predicates = [HasAVX2] in {<br>
(VPBLENDWYrri VR256:$src1, VR256:$src2, imm:$mask)>;<br>
}<br>
<br>
+// Patterns<br>
+let Predicates = [UseAVX] in {<br>
+ let AddedComplexity = 15 in {<br>
+ // Move scalar to XMM zero-extended, zeroing a VR128 then do a<br>
+ // MOVS{S,D} to the lower bits.<br>
+ def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),<br>
+ (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)>;<br>
+ def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),<br>
+ (VBLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;<br>
+ def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),<br>
+ (VBLENDPSrri (v4i32 (V_SET0)), VR128:$src, (i8 1))>;<br>
+ def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),<br>
+ (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)>;<br>
+<br>
+ // Move low f32 and clear high bits.<br>
+ def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),<br>
+ (VBLENDPSYrri (v8f32 (AVX_SET0)), VR256:$src, (i8 1))>;<br>
+ def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),<br>
+ (VBLENDPSYrri (v8i32 (AVX_SET0)), VR256:$src, (i8 1))>;<br>
+ }<br>
+<br>
+ def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,<br>
+ (v4f32 (scalar_to_vector FR32:$src)), (iPTR 0)))),<br>
+ (SUBREG_TO_REG (i32 0),<br>
+ (v4f32 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)),<br>
+ sub_xmm)>;<br>
+ def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,<br>
+ (v2f64 (scalar_to_vector FR64:$src)), (iPTR 0)))),<br>
+ (SUBREG_TO_REG (i64 0),<br>
+ (v2f64 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)),<br>
+ sub_xmm)>;<br>
+<br>
+ // Move low f64 and clear high bits.<br>
+ def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),<br>
+ (VBLENDPDYrri (v4f64 (AVX_SET0)), VR256:$src, (i8 1))>;<br>
+<br>
+ def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),<br>
+ (VBLENDPDYrri (v4i64 (AVX_SET0)), VR256:$src, (i8 1))>;<br>
+}<br>
+<br>
+let Predicates = [UseSSE41] in {<br>
+ // With SSE41 we can use blends for these patterns.<br>
+ def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),<br>
+ (BLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;<br>
+ def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),<br>
+ (BLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;<br>
+ def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),<br>
+ (BLENDPDrri (v2f64 (V_SET0)), VR128:$src, (i8 1))>;<br>
+}<br>
+<br>
+<br>
/// SS41I_ternary_int - SSE 4.1 ternary operator<br>
let Uses = [XMM0], Constraints = "$src1 = $dst" in {<br>
multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, PatFrag mem_frag,<br>
<br>
Modified: llvm/trunk/test/CodeGen/X86/combine-or.ll<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-or.ll?rev=219022&r1=219021&r2=219022&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-or.ll?rev=219022&r1=219021&r2=219022&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/test/CodeGen/X86/combine-or.ll (original)<br>
+++ llvm/trunk/test/CodeGen/X86/combine-or.ll Fri Oct 3 16:38:49 2014<br>
@@ -228,9 +228,9 @@ define <4 x i32> @test18(<4 x i32> %a, <<br>
; CHECK: # BB#0:<br>
; CHECK-NEXT: xorps %xmm2, %xmm2<br>
; CHECK-NEXT: xorps %xmm3, %xmm3<br>
-; CHECK-NEXT: movss %xmm0, %xmm3<br>
+; CHECK-NEXT: blendps $1, %xmm0, %xmm3<br>
; CHECK-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm2[0,0]<br>
-; CHECK-NEXT: movss %xmm1, %xmm2<br>
+; CHECK-NEXT: blendps $1, %xmm1, %xmm2<br>
; CHECK-NEXT: orps %xmm3, %xmm2<br>
; CHECK-NEXT: movaps %xmm2, %xmm0<br>
; CHECK-NEXT: retq<br>
<br>
Modified: llvm/trunk/test/CodeGen/X86/sse41.ll<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse41.ll?rev=219022&r1=219021&r2=219022&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse41.ll?rev=219022&r1=219021&r2=219022&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/test/CodeGen/X86/sse41.ll (original)<br>
+++ llvm/trunk/test/CodeGen/X86/sse41.ll Fri Oct 3 16:38:49 2014<br>
@@ -522,7 +522,7 @@ define <4 x float> @shuf_X00A(<4 x float<br>
; X32-LABEL: shuf_X00A:<br>
; X32: ## BB#0:<br>
; X32-NEXT: xorps %xmm2, %xmm2<br>
-; X32-NEXT: movss %xmm0, %xmm2<br>
+; X32-NEXT: blendps $1, %xmm0, %xmm2<br>
; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[0]<br>
; X32-NEXT: movaps %xmm2, %xmm0<br>
; X32-NEXT: retl<br>
@@ -530,7 +530,7 @@ define <4 x float> @shuf_X00A(<4 x float<br>
; X64-LABEL: shuf_X00A:<br>
; X64: ## BB#0:<br>
; X64-NEXT: xorps %xmm2, %xmm2<br>
-; X64-NEXT: movss %xmm0, %xmm2<br>
+; X64-NEXT: blendps $1, %xmm0, %xmm2<br>
; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[0]<br>
; X64-NEXT: movaps %xmm2, %xmm0<br>
; X64-NEXT: retq<br>
@@ -546,7 +546,7 @@ define <4 x float> @shuf_X00X(<4 x float<br>
; X32-LABEL: shuf_X00X:<br>
; X32: ## BB#0:<br>
; X32-NEXT: xorps %xmm1, %xmm1<br>
-; X32-NEXT: movss %xmm0, %xmm1<br>
+; X32-NEXT: blendps $1, %xmm0, %xmm1<br>
; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]<br>
; X32-NEXT: movaps %xmm1, %xmm0<br>
; X32-NEXT: retl<br>
@@ -554,7 +554,7 @@ define <4 x float> @shuf_X00X(<4 x float<br>
; X64-LABEL: shuf_X00X:<br>
; X64: ## BB#0:<br>
; X64-NEXT: xorps %xmm1, %xmm1<br>
-; X64-NEXT: movss %xmm0, %xmm1<br>
+; X64-NEXT: blendps $1, %xmm0, %xmm1<br>
; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]<br>
; X64-NEXT: movaps %xmm1, %xmm0<br>
; X64-NEXT: retq<br>
@@ -570,7 +570,7 @@ define <4 x float> @shuf_X0YC(<4 x float<br>
; X32-LABEL: shuf_X0YC:<br>
; X32: ## BB#0:<br>
; X32-NEXT: xorps %xmm2, %xmm2<br>
-; X32-NEXT: movss %xmm0, %xmm2<br>
+; X32-NEXT: blendps $1, %xmm0, %xmm2<br>
; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1,0]<br>
; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]<br>
; X32-NEXT: movaps %xmm2, %xmm0<br>
@@ -579,7 +579,7 @@ define <4 x float> @shuf_X0YC(<4 x float<br>
; X64-LABEL: shuf_X0YC:<br>
; X64: ## BB#0:<br>
; X64-NEXT: xorps %xmm2, %xmm2<br>
-; X64-NEXT: movss %xmm0, %xmm2<br>
+; X64-NEXT: blendps $1, %xmm0, %xmm2<br>
; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1,0]<br>
; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]<br>
; X64-NEXT: movaps %xmm2, %xmm0<br>
@@ -692,7 +692,7 @@ define <4 x i32> @i32_shuf_X00A(<4 x i32<br>
; X32-LABEL: i32_shuf_X00A:<br>
; X32: ## BB#0:<br>
; X32-NEXT: xorps %xmm2, %xmm2<br>
-; X32-NEXT: movss %xmm0, %xmm2<br>
+; X32-NEXT: blendps $1, %xmm0, %xmm2<br>
; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[0]<br>
; X32-NEXT: movaps %xmm2, %xmm0<br>
; X32-NEXT: retl<br>
@@ -700,7 +700,7 @@ define <4 x i32> @i32_shuf_X00A(<4 x i32<br>
; X64-LABEL: i32_shuf_X00A:<br>
; X64: ## BB#0:<br>
; X64-NEXT: xorps %xmm2, %xmm2<br>
-; X64-NEXT: movss %xmm0, %xmm2<br>
+; X64-NEXT: blendps $1, %xmm0, %xmm2<br>
; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[0]<br>
; X64-NEXT: movaps %xmm2, %xmm0<br>
; X64-NEXT: retq<br>
@@ -716,7 +716,7 @@ define <4 x i32> @i32_shuf_X00X(<4 x i32<br>
; X32-LABEL: i32_shuf_X00X:<br>
; X32: ## BB#0:<br>
; X32-NEXT: xorps %xmm1, %xmm1<br>
-; X32-NEXT: movss %xmm0, %xmm1<br>
+; X32-NEXT: blendps $1, %xmm0, %xmm1<br>
; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]<br>
; X32-NEXT: movaps %xmm1, %xmm0<br>
; X32-NEXT: retl<br>
@@ -724,7 +724,7 @@ define <4 x i32> @i32_shuf_X00X(<4 x i32<br>
; X64-LABEL: i32_shuf_X00X:<br>
; X64: ## BB#0:<br>
; X64-NEXT: xorps %xmm1, %xmm1<br>
-; X64-NEXT: movss %xmm0, %xmm1<br>
+; X64-NEXT: blendps $1, %xmm0, %xmm1<br>
; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]<br>
; X64-NEXT: movaps %xmm1, %xmm0<br>
; X64-NEXT: retq<br>
@@ -740,7 +740,7 @@ define <4 x i32> @i32_shuf_X0YC(<4 x i32<br>
; X32-LABEL: i32_shuf_X0YC:<br>
; X32: ## BB#0:<br>
; X32-NEXT: xorps %xmm2, %xmm2<br>
-; X32-NEXT: movss %xmm0, %xmm2<br>
+; X32-NEXT: blendps $1, %xmm0, %xmm2<br>
; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1,0]<br>
; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]<br>
; X32-NEXT: movaps %xmm2, %xmm0<br>
@@ -749,7 +749,7 @@ define <4 x i32> @i32_shuf_X0YC(<4 x i32<br>
; X64-LABEL: i32_shuf_X0YC:<br>
; X64: ## BB#0:<br>
; X64-NEXT: xorps %xmm2, %xmm2<br>
-; X64-NEXT: movss %xmm0, %xmm2<br>
+; X64-NEXT: blendps $1, %xmm0, %xmm2<br>
; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1,0]<br>
; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]<br>
; X64-NEXT: movaps %xmm2, %xmm0<br>
<br>
Modified: llvm/trunk/test/CodeGen/X86/vec_set-3.ll<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-3.ll?rev=219022&r1=219021&r2=219022&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_set-3.ll?rev=219022&r1=219021&r2=219022&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/test/CodeGen/X86/vec_set-3.ll (original)<br>
+++ llvm/trunk/test/CodeGen/X86/vec_set-3.ll Fri Oct 3 16:38:49 2014<br>
@@ -39,7 +39,7 @@ entry:<br>
define <4 x float> @test3(<4 x float> %A) {<br>
; CHECK-LABEL: test3:<br>
; CHECK: xorps %[[X1:xmm[0-9]+]], %[[X1]]<br>
-; CHECK-NEXT: movss %xmm0, %[[X1]]<br>
+; CHECK-NEXT: blendps $1, %xmm0, %[[X1]]<br>
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = [[X1]][1,0,1,1]<br>
; CHECK-NEXT: retl<br>
;<br>
<br>
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll?rev=219022&r1=219021&r2=219022&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll?rev=219022&r1=219021&r2=219022&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll (original)<br>
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll Fri Oct 3 16:38:49 2014<br>
@@ -438,17 +438,38 @@ define <4 x i32> @shuffle_v4i32_4015(<4<br>
}<br>
<br>
define <4 x float> @shuffle_v4f32_4zzz(<4 x float> %a) {<br>
-; SSE-LABEL: shuffle_v4f32_4zzz:<br>
-; SSE: # BB#0:<br>
-; SSE-NEXT: xorps %xmm1, %xmm1<br>
-; SSE-NEXT: movss %xmm0, %xmm1<br>
-; SSE-NEXT: movaps %xmm1, %xmm0<br>
-; SSE-NEXT: retq<br>
+; SSE2-LABEL: shuffle_v4f32_4zzz:<br>
+; SSE2: # BB#0:<br>
+; SSE2-NEXT: xorps %xmm1, %xmm1<br>
+; SSE2-NEXT: movss %xmm0, %xmm1<br>
+; SSE2-NEXT: movaps %xmm1, %xmm0<br>
+; SSE2-NEXT: retq<br>
+;<br>
+; SSE3-LABEL: shuffle_v4f32_4zzz:<br>
+; SSE3: # BB#0:<br>
+; SSE3-NEXT: xorps %xmm1, %xmm1<br>
+; SSE3-NEXT: movss %xmm0, %xmm1<br>
+; SSE3-NEXT: movaps %xmm1, %xmm0<br>
+; SSE3-NEXT: retq<br>
+;<br>
+; SSSE3-LABEL: shuffle_v4f32_4zzz:<br>
+; SSSE3: # BB#0:<br>
+; SSSE3-NEXT: xorps %xmm1, %xmm1<br>
+; SSSE3-NEXT: movss %xmm0, %xmm1<br>
+; SSSE3-NEXT: movaps %xmm1, %xmm0<br>
+; SSSE3-NEXT: retq<br>
+;<br>
+; SSE41-LABEL: shuffle_v4f32_4zzz:<br>
+; SSE41: # BB#0:<br>
+; SSE41-NEXT: xorps %xmm1, %xmm1<br>
+; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]<br>
+; SSE41-NEXT: movaps %xmm1, %xmm0<br>
+; SSE41-NEXT: retq<br>
;<br>
; AVX-LABEL: shuffle_v4f32_4zzz:<br>
; AVX: # BB#0:<br>
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1<br>
-; AVX-NEXT: vmovss %xmm0, %xmm1, %xmm0<br>
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]<br>
; AVX-NEXT: retq<br>
%shuffle = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 4, i32 1, i32 2, i32 3><br>
ret <4 x float> %shuffle<br>
@@ -639,34 +660,76 @@ define <4 x float> @shuffle_v4f32_z6zz(<<br>
}<br>
<br>
define <4 x i32> @shuffle_v4i32_4zzz(<4 x i32> %a) {<br>
-; SSE-LABEL: shuffle_v4i32_4zzz:<br>
-; SSE: # BB#0:<br>
-; SSE-NEXT: xorps %xmm1, %xmm1<br>
-; SSE-NEXT: movss %xmm0, %xmm1<br>
-; SSE-NEXT: movaps %xmm1, %xmm0<br>
-; SSE-NEXT: retq<br>
+; SSE2-LABEL: shuffle_v4i32_4zzz:<br>
+; SSE2: # BB#0:<br>
+; SSE2-NEXT: xorps %xmm1, %xmm1<br>
+; SSE2-NEXT: movss %xmm0, %xmm1<br>
+; SSE2-NEXT: movaps %xmm1, %xmm0<br>
+; SSE2-NEXT: retq<br>
+;<br>
+; SSE3-LABEL: shuffle_v4i32_4zzz:<br>
+; SSE3: # BB#0:<br>
+; SSE3-NEXT: xorps %xmm1, %xmm1<br>
+; SSE3-NEXT: movss %xmm0, %xmm1<br>
+; SSE3-NEXT: movaps %xmm1, %xmm0<br>
+; SSE3-NEXT: retq<br>
+;<br>
+; SSSE3-LABEL: shuffle_v4i32_4zzz:<br>
+; SSSE3: # BB#0:<br>
+; SSSE3-NEXT: xorps %xmm1, %xmm1<br>
+; SSSE3-NEXT: movss %xmm0, %xmm1<br>
+; SSSE3-NEXT: movaps %xmm1, %xmm0<br>
+; SSSE3-NEXT: retq<br>
+;<br>
+; SSE41-LABEL: shuffle_v4i32_4zzz:<br>
+; SSE41: # BB#0:<br>
+; SSE41-NEXT: xorps %xmm1, %xmm1<br>
+; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]<br>
+; SSE41-NEXT: movaps %xmm1, %xmm0<br>
+; SSE41-NEXT: retq<br>
;<br>
; AVX-LABEL: shuffle_v4i32_4zzz:<br>
; AVX: # BB#0:<br>
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1<br>
-; AVX-NEXT: vmovss %xmm0, %xmm1, %xmm0<br>
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]<br>
; AVX-NEXT: retq<br>
%shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 4, i32 1, i32 2, i32 3><br>
ret <4 x i32> %shuffle<br>
}<br>
<br>
define <4 x i32> @shuffle_v4i32_z4zz(<4 x i32> %a) {<br>
-; SSE-LABEL: shuffle_v4i32_z4zz:<br>
-; SSE: # BB#0:<br>
-; SSE-NEXT: xorps %xmm1, %xmm1<br>
-; SSE-NEXT: movss %xmm0, %xmm1<br>
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]<br>
-; SSE-NEXT: retq<br>
+; SSE2-LABEL: shuffle_v4i32_z4zz:<br>
+; SSE2: # BB#0:<br>
+; SSE2-NEXT: xorps %xmm1, %xmm1<br>
+; SSE2-NEXT: movss %xmm0, %xmm1<br>
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]<br>
+; SSE2-NEXT: retq<br>
+;<br>
+; SSE3-LABEL: shuffle_v4i32_z4zz:<br>
+; SSE3: # BB#0:<br>
+; SSE3-NEXT: xorps %xmm1, %xmm1<br>
+; SSE3-NEXT: movss %xmm0, %xmm1<br>
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]<br>
+; SSE3-NEXT: retq<br>
+;<br>
+; SSSE3-LABEL: shuffle_v4i32_z4zz:<br>
+; SSSE3: # BB#0:<br>
+; SSSE3-NEXT: xorps %xmm1, %xmm1<br>
+; SSSE3-NEXT: movss %xmm0, %xmm1<br>
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]<br>
+; SSSE3-NEXT: retq<br>
+;<br>
+; SSE41-LABEL: shuffle_v4i32_z4zz:<br>
+; SSE41: # BB#0:<br>
+; SSE41-NEXT: xorps %xmm1, %xmm1<br>
+; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]<br>
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]<br>
+; SSE41-NEXT: retq<br>
;<br>
; AVX-LABEL: shuffle_v4i32_z4zz:<br>
; AVX: # BB#0:<br>
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1<br>
-; AVX-NEXT: vmovss %xmm0, %xmm1, %xmm0<br>
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]<br>
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]<br>
; AVX-NEXT: retq<br>
%shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 2, i32 4, i32 3, i32 0><br>
@@ -674,17 +737,38 @@ define <4 x i32> @shuffle_v4i32_z4zz(<4<br>
}<br>
<br>
define <4 x i32> @shuffle_v4i32_zz4z(<4 x i32> %a) {<br>
-; SSE-LABEL: shuffle_v4i32_zz4z:<br>
-; SSE: # BB#0:<br>
-; SSE-NEXT: xorps %xmm1, %xmm1<br>
-; SSE-NEXT: movss %xmm0, %xmm1<br>
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]<br>
-; SSE-NEXT: retq<br>
+; SSE2-LABEL: shuffle_v4i32_zz4z:<br>
+; SSE2: # BB#0:<br>
+; SSE2-NEXT: xorps %xmm1, %xmm1<br>
+; SSE2-NEXT: movss %xmm0, %xmm1<br>
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]<br>
+; SSE2-NEXT: retq<br>
+;<br>
+; SSE3-LABEL: shuffle_v4i32_zz4z:<br>
+; SSE3: # BB#0:<br>
+; SSE3-NEXT: xorps %xmm1, %xmm1<br>
+; SSE3-NEXT: movss %xmm0, %xmm1<br>
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]<br>
+; SSE3-NEXT: retq<br>
+;<br>
+; SSSE3-LABEL: shuffle_v4i32_zz4z:<br>
+; SSSE3: # BB#0:<br>
+; SSSE3-NEXT: xorps %xmm1, %xmm1<br>
+; SSSE3-NEXT: movss %xmm0, %xmm1<br>
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]<br>
+; SSSE3-NEXT: retq<br>
+;<br>
+; SSE41-LABEL: shuffle_v4i32_zz4z:<br>
+; SSE41: # BB#0:<br>
+; SSE41-NEXT: xorps %xmm1, %xmm1<br>
+; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]<br>
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]<br>
+; SSE41-NEXT: retq<br>
;<br>
; AVX-LABEL: shuffle_v4i32_zz4z:<br>
; AVX: # BB#0:<br>
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1<br>
-; AVX-NEXT: vmovss %xmm0, %xmm1, %xmm0<br>
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]<br>
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,0,1]<br>
; AVX-NEXT: retq<br>
%shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 0, i32 0, i32 4, i32 0><br>
@@ -692,17 +776,38 @@ define <4 x i32> @shuffle_v4i32_zz4z(<4<br>
}<br>
<br>
define <4 x i32> @shuffle_v4i32_zuu4(<4 x i32> %a) {<br>
-; SSE-LABEL: shuffle_v4i32_zuu4:<br>
-; SSE: # BB#0:<br>
-; SSE-NEXT: xorps %xmm1, %xmm1<br>
-; SSE-NEXT: movss %xmm0, %xmm1<br>
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,0]<br>
-; SSE-NEXT: retq<br>
+; SSE2-LABEL: shuffle_v4i32_zuu4:<br>
+; SSE2: # BB#0:<br>
+; SSE2-NEXT: xorps %xmm1, %xmm1<br>
+; SSE2-NEXT: movss %xmm0, %xmm1<br>
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,0]<br>
+; SSE2-NEXT: retq<br>
+;<br>
+; SSE3-LABEL: shuffle_v4i32_zuu4:<br>
+; SSE3: # BB#0:<br>
+; SSE3-NEXT: xorps %xmm1, %xmm1<br>
+; SSE3-NEXT: movss %xmm0, %xmm1<br>
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,0]<br>
+; SSE3-NEXT: retq<br>
+;<br>
+; SSSE3-LABEL: shuffle_v4i32_zuu4:<br>
+; SSSE3: # BB#0:<br>
+; SSSE3-NEXT: xorps %xmm1, %xmm1<br>
+; SSSE3-NEXT: movss %xmm0, %xmm1<br>
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,0]<br>
+; SSSE3-NEXT: retq<br>
+;<br>
+; SSE41-LABEL: shuffle_v4i32_zuu4:<br>
+; SSE41: # BB#0:<br>
+; SSE41-NEXT: xorps %xmm1, %xmm1<br>
+; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]<br>
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,0]<br>
+; SSE41-NEXT: retq<br>
;<br>
; AVX-LABEL: shuffle_v4i32_zuu4:<br>
; AVX: # BB#0:<br>
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1<br>
-; AVX-NEXT: vmovss %xmm0, %xmm1, %xmm0<br>
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]<br>
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,0]<br>
; AVX-NEXT: retq<br>
%shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 0, i32 undef, i32 undef, i32 4><br>
@@ -1031,12 +1136,33 @@ define <4 x i32> @insert_mem_and_zero_v4<br>
}<br>
<br>
define <4 x float> @insert_reg_and_zero_v4f32(float %a) {<br>
-; SSE-LABEL: insert_reg_and_zero_v4f32:<br>
-; SSE: # BB#0:<br>
-; SSE-NEXT: xorps %xmm1, %xmm1<br>
-; SSE-NEXT: movss %xmm0, %xmm1<br>
-; SSE-NEXT: movaps %xmm1, %xmm0<br>
-; SSE-NEXT: retq<br>
+; SSE2-LABEL: insert_reg_and_zero_v4f32:<br>
+; SSE2: # BB#0:<br>
+; SSE2-NEXT: xorps %xmm1, %xmm1<br>
+; SSE2-NEXT: movss %xmm0, %xmm1<br>
+; SSE2-NEXT: movaps %xmm1, %xmm0<br>
+; SSE2-NEXT: retq<br>
+;<br>
+; SSE3-LABEL: insert_reg_and_zero_v4f32:<br>
+; SSE3: # BB#0:<br>
+; SSE3-NEXT: xorps %xmm1, %xmm1<br>
+; SSE3-NEXT: movss %xmm0, %xmm1<br>
+; SSE3-NEXT: movaps %xmm1, %xmm0<br>
+; SSE3-NEXT: retq<br>
+;<br>
+; SSSE3-LABEL: insert_reg_and_zero_v4f32:<br>
+; SSSE3: # BB#0:<br>
+; SSSE3-NEXT: xorps %xmm1, %xmm1<br>
+; SSSE3-NEXT: movss %xmm0, %xmm1<br>
+; SSSE3-NEXT: movaps %xmm1, %xmm0<br>
+; SSSE3-NEXT: retq<br>
+;<br>
+; SSE41-LABEL: insert_reg_and_zero_v4f32:<br>
+; SSE41: # BB#0:<br>
+; SSE41-NEXT: xorps %xmm1, %xmm1<br>
+; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]<br>
+; SSE41-NEXT: movaps %xmm1, %xmm0<br>
+; SSE41-NEXT: retq<br>
;<br>
; AVX-LABEL: insert_reg_and_zero_v4f32:<br>
; AVX: # BB#0:<br>
<br>
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll?rev=219022&r1=219021&r2=219022&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll?rev=219022&r1=219021&r2=219022&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll (original)<br>
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll Fri Oct 3 16:38:49 2014<br>
@@ -678,8 +678,8 @@ define <4 x i64> @insert_reg_and_zero_v4<br>
; AVX1-LABEL: insert_reg_and_zero_v4i64:<br>
; AVX1: # BB#0:<br>
; AVX1-NEXT: vmovq %rdi, %xmm0<br>
-; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1<br>
-; AVX1-NEXT: vmovsd %xmm0, %xmm1, %xmm0<br>
+; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1<br>
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]<br>
; AVX1-NEXT: retq<br>
;<br>
; AVX2-LABEL: insert_reg_and_zero_v4i64:<br>
@@ -697,8 +697,8 @@ define <4 x i64> @insert_mem_and_zero_v4<br>
; AVX1-LABEL: insert_mem_and_zero_v4i64:<br>
; AVX1: # BB#0:<br>
; AVX1-NEXT: vmovq (%rdi), %xmm0<br>
-; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1<br>
-; AVX1-NEXT: vmovsd %xmm0, %xmm1, %xmm0<br>
+; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1<br>
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]<br>
; AVX1-NEXT: retq<br>
;<br>
; AVX2-LABEL: insert_mem_and_zero_v4i64:<br>
<br>
<br>
_______________________________________________<br>
llvm-commits mailing list<br>
<a href="mailto:llvm-commits@cs.uiuc.edu">llvm-commits@cs.uiuc.edu</a><br>
<a href="http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits" target="_blank">http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits</a><br>
</blockquote></div><br></div>