[llvm] r337134 - [X86] Use 128-bit ops for 256-bit vzmovl patterns.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sun Jul 15 11:51:07 PDT 2018
Author: ctopper
Date: Sun Jul 15 11:51:07 2018
New Revision: 337134
URL: http://llvm.org/viewvc/llvm-project?rev=337134&view=rev
Log:
[X86] Use 128-bit ops for 256-bit vzmovl patterns.
128-bit ops implicitly zero the upper bits. This should address the comment about domain crossing for the integer version without AVX2 since we can use a 128-bit VBLENDW without AVX2.
The only bad thing I see here is that we failed to reuse an vxorps in some of the tests, but I think that's already known issue.
Modified:
llvm/trunk/lib/Target/X86/X86InstrSSE.td
llvm/trunk/test/CodeGen/X86/2012-01-12-extract-sv.ll
llvm/trunk/test/CodeGen/X86/avx-load-store.ll
llvm/trunk/test/CodeGen/X86/vec_extract-avx.ll
llvm/trunk/test/CodeGen/X86/vector-extend-inreg.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=337134&r1=337133&r2=337134&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Sun Jul 15 11:51:07 2018
@@ -6431,19 +6431,26 @@ let Predicates = [HasAVX, OptForSpeed] i
// Move low f32 and clear high bits.
def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
- (VBLENDPSYrri (v8f32 (AVX_SET0)), VR256:$src, (i8 1))>;
+ (SUBREG_TO_REG (i32 0),
+ (VBLENDPSrri (v4f32 (V_SET0)),
+ (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm),
+ (i8 1)), sub_xmm)>;
+ def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
+ (SUBREG_TO_REG (i32 0),
+ (VPBLENDWrri (v4i32 (V_SET0)),
+ (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm),
+ (i8 3)), sub_xmm)>;
- // Move low f64 and clear high bits.
def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
- (VBLENDPDYrri (v4f64 (AVX_SET0)), VR256:$src, (i8 1))>;
-
- // These will incur an FP/int domain crossing penalty, but it may be the only
- // way without AVX2. Do not add any complexity because we may be able to match
- // more optimal patterns defined earlier in this file.
- def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
- (VBLENDPSYrri (v8i32 (AVX_SET0)), VR256:$src, (i8 1))>;
+ (SUBREG_TO_REG (i32 0),
+ (VBLENDPDrri (v2f64 (V_SET0)),
+ (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm),
+ (i8 1)), sub_xmm)>;
def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
- (VBLENDPDYrri (v4i64 (AVX_SET0)), VR256:$src, (i8 1))>;
+ (SUBREG_TO_REG (i32 0),
+ (VPBLENDWrri (v2i64 (V_SET0)),
+ (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm),
+ (i8 0xf)), sub_xmm)>;
}
// Prefer a movss or movsd over a blendps when optimizing for size. these were
Modified: llvm/trunk/test/CodeGen/X86/2012-01-12-extract-sv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2012-01-12-extract-sv.ll?rev=337134&r1=337133&r2=337134&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2012-01-12-extract-sv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2012-01-12-extract-sv.ll Sun Jul 15 11:51:07 2018
@@ -1,16 +1,18 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mattr=+avx -mtriple=i686-pc-win32 | FileCheck %s
define void @endless_loop() {
; CHECK-LABEL: endless_loop:
-; CHECK-NEXT: # %bb.0:
+; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovaps (%eax), %ymm0
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,1,0,1]
-; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; CHECK-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
-; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4,5,6,7]
+; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5,6],ymm0[7]
; CHECK-NEXT: vmovaps %ymm0, (%eax)
; CHECK-NEXT: vmovaps %ymm1, (%eax)
; CHECK-NEXT: vzeroupper
Modified: llvm/trunk/test/CodeGen/X86/avx-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-load-store.ll?rev=337134&r1=337133&r2=337134&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-load-store.ll Sun Jul 15 11:51:07 2018
@@ -87,8 +87,10 @@ define <8 x float> @mov00(<8 x float> %v
; CHECK_O0-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK_O0-NEXT: # implicit-def: $ymm1
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
+; CHECK_O0-NEXT: vmovaps %xmm1, %xmm0
; CHECK_O0-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; CHECK_O0-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm2[1,2,3,4,5,6,7]
+; CHECK_O0-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
+; CHECK_O0-NEXT: # kill: def $ymm0 killed $xmm0
; CHECK_O0-NEXT: retq
%val = load float, float* %ptr
%i0 = insertelement <8 x float> zeroinitializer, float %val, i32 0
@@ -106,8 +108,10 @@ define <4 x double> @mov01(<4 x double>
; CHECK_O0-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK_O0-NEXT: # implicit-def: $ymm1
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
+; CHECK_O0-NEXT: vmovaps %xmm1, %xmm0
; CHECK_O0-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; CHECK_O0-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm2[1,2,3]
+; CHECK_O0-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm2[1]
+; CHECK_O0-NEXT: # kill: def $ymm0 killed $xmm0
; CHECK_O0-NEXT: retq
%val = load double, double* %ptr
%i0 = insertelement <4 x double> zeroinitializer, double %val, i32 0
Modified: llvm/trunk/test/CodeGen/X86/vec_extract-avx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_extract-avx.ll?rev=337134&r1=337133&r2=337134&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_extract-avx.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_extract-avx.ll Sun Jul 15 11:51:07 2018
@@ -119,7 +119,7 @@ define void @legal_vzmovl_2i32_8i32(<2 x
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; X32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X32-NEXT: vmovaps %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
@@ -128,7 +128,7 @@ define void @legal_vzmovl_2i32_8i32(<2 x
; X64: # %bb.0:
; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X64-NEXT: vmovaps %ymm0, (%rsi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -146,7 +146,7 @@ define void @legal_vzmovl_2i64_4i64(<2 x
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovups (%ecx), %xmm0
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; X32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; X32-NEXT: vmovaps %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
@@ -155,7 +155,7 @@ define void @legal_vzmovl_2i64_4i64(<2 x
; X64: # %bb.0:
; X64-NEXT: vmovups (%rdi), %xmm0
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; X64-NEXT: vmovaps %ymm0, (%rsi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -180,7 +180,7 @@ define void @legal_vzmovl_2f32_8f32(<2 x
; X64: # %bb.0:
; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X64-NEXT: vmovaps %ymm0, (%rsi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -198,7 +198,7 @@ define void @legal_vzmovl_2f64_4f64(<2 x
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vmovups (%ecx), %xmm0
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; X32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; X32-NEXT: vmovaps %ymm0, (%eax)
; X32-NEXT: vzeroupper
; X32-NEXT: retl
@@ -207,7 +207,7 @@ define void @legal_vzmovl_2f64_4f64(<2 x
; X64: # %bb.0:
; X64-NEXT: vmovups (%rdi), %xmm0
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; X64-NEXT: vmovaps %ymm0, (%rsi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/vector-extend-inreg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-extend-inreg.ll?rev=337134&r1=337133&r2=337134&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-extend-inreg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-extend-inreg.ll Sun Jul 15 11:51:07 2018
@@ -73,7 +73,8 @@ define i64 @extract_any_extend_vector_in
; X32-AVX-NEXT: movl 40(%ebp), %ecx
; X32-AVX-NEXT: vbroadcastsd 32(%ebp), %ymm0
; X32-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X32-AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; X32-AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; X32-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-AVX-NEXT: vmovaps %ymm1, {{[0-9]+}}(%esp)
; X32-AVX-NEXT: vmovaps %ymm1, {{[0-9]+}}(%esp)
; X32-AVX-NEXT: vmovaps %ymm1, {{[0-9]+}}(%esp)
@@ -102,7 +103,8 @@ define i64 @extract_any_extend_vector_in
; X64-AVX-NEXT: # kill: def $edi killed $edi def $rdi
; X64-AVX-NEXT: vpermpd {{.*#+}} ymm0 = ymm3[3,1,2,3]
; X64-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X64-AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; X64-AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; X64-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
; X64-AVX-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
; X64-AVX-NEXT: vmovaps %ymm1, (%rsp)
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll?rev=337134&r1=337133&r2=337134&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll Sun Jul 15 11:51:07 2018
@@ -1376,7 +1376,7 @@ define <4 x double> @insert_reg_and_zero
; ALL: # %bb.0:
; ALL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; ALL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; ALL-NEXT: retq
%v = insertelement <4 x double> undef, double %a, i32 0
%shuffle = shufflevector <4 x double> %v, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll?rev=337134&r1=337133&r2=337134&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll Sun Jul 15 11:51:07 2018
@@ -523,13 +523,13 @@ define <4 x double> @combine_pshufb_as_v
; X32-LABEL: combine_pshufb_as_vzmovl_64:
; X32: # %bb.0:
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; X32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vzmovl_64:
; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; X64-NEXT: retq
%1 = bitcast <4 x double> %a0 to <32 x i8>
%2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
@@ -541,13 +541,13 @@ define <8 x float> @combine_pshufb_as_vz
; X32-LABEL: combine_pshufb_as_vzmovl_32:
; X32: # %bb.0:
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; X32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vzmovl_32:
; X64: # %bb.0:
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X64-NEXT: retq
%1 = bitcast <8 x float> %a0 to <32 x i8>
%2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
More information about the llvm-commits
mailing list