[llvm] r283020 - [X86] Cleanup patterns for using VMOVDDUP for broadcasts.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sat Oct 1 00:11:24 PDT 2016
Author: ctopper
Date: Sat Oct 1 02:11:24 2016
New Revision: 283020
URL: http://llvm.org/viewvc/llvm-project?rev=283020&view=rev
Log:
[X86] Cleanup patterns for using VMOVDDUP for broadcasts.
-Remove OptForSize. Not all of the backend follows the same rules for creating broadcasts and there is no conflicting pattern.
-Don't stop selecting VEX VMOVDDUP when AVX512 is supported. We need VLX for EVEX VMOVDDUP.
-Only use VMOVDDUP for v2i64 broadcasts if AVX2 is not supported.
Modified:
llvm/trunk/lib/Target/X86/X86InstrSSE.td
llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll
llvm/trunk/test/CodeGen/X86/splat-for-size.ll
Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=283020&r1=283019&r2=283020&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Sat Oct 1 02:11:24 2016
@@ -5162,12 +5162,12 @@ let Predicates = [HasAVX] in {
(VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
}
-let Predicates = [UseAVX, OptForSize] in {
- def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
- (VMOVDDUPrm addr:$src)>;
- def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
- (VMOVDDUPrm addr:$src)>;
-}
+let Predicates = [HasAVX, NoVLX] in
+def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
+ (VMOVDDUPrm addr:$src)>;
+let Predicates = [HasAVX1Only] in
+def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
+ (VMOVDDUPrm addr:$src)>;
let Predicates = [UseSSE3] in {
def : Pat<(X86Movddup (memopv2f64 addr:$src)),
Modified: llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll?rev=283020&r1=283019&r2=283020&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll Sat Oct 1 02:11:24 2016
@@ -250,8 +250,7 @@ define <8 x i16> @broadcast_mem_v4i16_v8
; X32-AVX2-LABEL: broadcast_mem_v4i16_v8i16:
; X32-AVX2: ## BB#0:
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X32-AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X32-AVX2-NEXT: retl
;
; X64-AVX2-LABEL: broadcast_mem_v4i16_v8i16:
Modified: llvm/trunk/test/CodeGen/X86/splat-for-size.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/splat-for-size.ll?rev=283020&r1=283019&r2=283020&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/splat-for-size.ll (original)
+++ llvm/trunk/test/CodeGen/X86/splat-for-size.ll Sat Oct 1 02:11:24 2016
@@ -49,11 +49,17 @@ define <8 x float> @splat_v8f32(<8 x flo
; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
define <2 x i64> @splat_v2i64(<2 x i64> %x) #1 {
-; CHECK-LABEL: splat_v2i64:
-; CHECK: # BB#0:
-; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
-; CHECK-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: retq
+; AVX-LABEL: splat_v2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+;
+; AVX2-LABEL: splat_v2i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
%add = add <2 x i64> %x, <i64 1, i64 1>
ret <2 x i64> %add
}
More information about the llvm-commits
mailing list