[llvm] r309546 - [X86][AVX512] Add masked MOVS[S|D] patterns

Guy Blank via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 31 01:26:14 PDT 2017


Author: guyblank
Date: Mon Jul 31 01:26:14 2017
New Revision: 309546

URL: http://llvm.org/viewvc/llvm-project?rev=309546&view=rev
Log:
[X86][AVX512] Add masked MOVS[S|D] patterns

Added patterns to recognize AND 1 on the mask of a scalar masked
move is not needed since only the lower bit is relevant for the
instruction.

Differential Revision:
https://reviews.llvm.org/D35897

Modified:
    llvm/trunk/lib/Target/X86/X86InstrAVX512.td
    llvm/trunk/test/CodeGen/X86/avx512-load-store.ll
    llvm/trunk/test/CodeGen/X86/avx512-select.ll

Modified: llvm/trunk/lib/Target/X86/X86InstrAVX512.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrAVX512.td?rev=309546&r1=309545&r2=309546&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrAVX512.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td Mon Jul 31 01:26:14 2017
@@ -4002,10 +4002,26 @@ defm : avx512_load_scalar_lowering_subre
 defm : avx512_load_scalar_lowering_subreg<"VMOVSDZ", avx512vl_f64_info,
                    (v8i1 (bitconvert (i8 (and GR8:$mask, (i8 1))))), GR8, sub_8bit>;
 
+def : Pat<(f32 (X86selects (scalar_to_vector (and GR8:$mask, (i8 1))),
+                           (f32 FR32X:$src1), (f32 FR32X:$src2))),
+          (COPY_TO_REGCLASS
+            (VMOVSSZrrk (COPY_TO_REGCLASS FR32X:$src2, VR128X),
+                        (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF),
+                          GR8:$mask, sub_8bit)), VK1WM),
+            (v4f32 (IMPLICIT_DEF)), FR32X:$src1), FR32X)>;
+
 def : Pat<(f32 (X86selects VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
           (COPY_TO_REGCLASS (VMOVSSZrrk (COPY_TO_REGCLASS FR32X:$src2, VR128X),
            VK1WM:$mask, (v4f32 (IMPLICIT_DEF)), FR32X:$src1), FR32X)>;
 
+def : Pat<(f64 (X86selects (scalar_to_vector (and GR8:$mask, (i8 1))),
+                           (f64 FR64X:$src1), (f64 FR64X:$src2))),
+          (COPY_TO_REGCLASS
+            (VMOVSDZrrk (COPY_TO_REGCLASS FR64X:$src2, VR128X),
+                        (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF),
+                          GR8:$mask, sub_8bit)), VK1WM),
+            (v2f64 (IMPLICIT_DEF)), FR64X:$src1), FR64X)>;
+
 def : Pat<(f64 (X86selects VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
           (COPY_TO_REGCLASS (VMOVSDZrrk (COPY_TO_REGCLASS FR64X:$src2, VR128X),
            VK1WM:$mask, (v2f64 (IMPLICIT_DEF)), FR64X:$src1), FR64X)>;

Modified: llvm/trunk/test/CodeGen/X86/avx512-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-load-store.ll?rev=309546&r1=309545&r2=309546&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-load-store.ll Mon Jul 31 01:26:14 2017
@@ -12,7 +12,6 @@ define <4 x float> @test_mm_mask_move_ss
 ; CHECK32-LABEL: test_mm_mask_move_ss:
 ; CHECK32:       # BB#0: # %entry
 ; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; CHECK32-NEXT:    andb $1, %al
 ; CHECK32-NEXT:    kmovw %eax, %k1
 ; CHECK32-NEXT:    vmovss %xmm2, %xmm0, %xmm0 {%k1}
 ; CHECK32-NEXT:    vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
@@ -37,7 +36,6 @@ define <4 x float> @test_mm_maskz_move_s
 ; CHECK32-LABEL: test_mm_maskz_move_ss:
 ; CHECK32:       # BB#0: # %entry
 ; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; CHECK32-NEXT:    andb $1, %al
 ; CHECK32-NEXT:    kmovw %eax, %k1
 ; CHECK32-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; CHECK32-NEXT:    vmovss %xmm1, %xmm0, %xmm2 {%k1}
@@ -62,7 +60,6 @@ define <2 x double> @test_mm_mask_move_s
 ; CHECK32-LABEL: test_mm_mask_move_sd:
 ; CHECK32:       # BB#0: # %entry
 ; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; CHECK32-NEXT:    andb $1, %al
 ; CHECK32-NEXT:    kmovw %eax, %k1
 ; CHECK32-NEXT:    vmovsd %xmm2, %xmm0, %xmm0 {%k1}
 ; CHECK32-NEXT:    vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
@@ -87,7 +84,6 @@ define <2 x double> @test_mm_maskz_move_
 ; CHECK32-LABEL: test_mm_maskz_move_sd:
 ; CHECK32:       # BB#0: # %entry
 ; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; CHECK32-NEXT:    andb $1, %al
 ; CHECK32-NEXT:    kmovw %eax, %k1
 ; CHECK32-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
 ; CHECK32-NEXT:    vmovsd %xmm1, %xmm0, %xmm2 {%k1}

Modified: llvm/trunk/test/CodeGen/X86/avx512-select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-select.ll?rev=309546&r1=309545&r2=309546&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-select.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-select.ll Mon Jul 31 01:26:14 2017
@@ -289,7 +289,6 @@ define double @pr30561_f64(double %b, do
 ;
 ; X64-LABEL: pr30561_f64:
 ; X64:       # BB#0:
-; X64-NEXT:    andb $1, %dil
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 {%k1}
 ; X64-NEXT:    retq
@@ -309,7 +308,6 @@ define float @pr30561_f32(float %b, floa
 ;
 ; X64-LABEL: pr30561_f32:
 ; X64:       # BB#0:
-; X64-NEXT:    andb $1, %dil
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vmovss %xmm1, %xmm0, %xmm0 {%k1}
 ; X64-NEXT:    retq




More information about the llvm-commits mailing list