[llvm] r325999 - [X86] Remove checks for '(scalar_to_vector (i8 (trunc GR32:)))' from scalar masked move patterns.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 23 16:15:05 PST 2018


Author: ctopper
Date: Fri Feb 23 16:15:05 2018
New Revision: 325999

URL: http://llvm.org/viewvc/llvm-project?rev=325999&view=rev
Log:
[X86] Remove checks for '(scalar_to_vector (i8 (trunc GR32:)))' from scalar masked move patterns.

This portion can be matched by other patterns. We don't need it to make the larger pattern valid. It's sufficient to have a v1i1 mask input without caring where it came from.

Modified:
    llvm/trunk/lib/Target/X86/X86InstrAVX512.td
    llvm/trunk/test/CodeGen/X86/avx512-load-store.ll

Modified: llvm/trunk/lib/Target/X86/X86InstrAVX512.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrAVX512.td?rev=325999&r1=325998&r2=325999&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrAVX512.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td Fri Feb 23 16:15:05 2018
@@ -3878,22 +3878,22 @@ multiclass avx512_move_scalar_lowering<s
 
 def : Pat<(_.VT (OpNode _.RC:$src0,
                         (_.VT (scalar_to_vector
-                                  (_.EltVT (X86selects (scalar_to_vector (i8 (trunc GR32:$mask))),
+                                  (_.EltVT (X86selects VK1WM:$mask,
                                                        (_.EltVT _.FRC:$src1),
                                                        (_.EltVT _.FRC:$src2))))))),
           (!cast<Instruction>(InstrStr#rrk)
                         (COPY_TO_REGCLASS _.FRC:$src2, _.RC),
-                        (COPY_TO_REGCLASS GR32:$mask, VK1WM),
+                        VK1WM:$mask,
                         (_.VT _.RC:$src0),
                         (COPY_TO_REGCLASS _.FRC:$src1, _.RC))>;
 
 def : Pat<(_.VT (OpNode _.RC:$src0,
                         (_.VT (scalar_to_vector
-                                  (_.EltVT (X86selects (scalar_to_vector (i8 (trunc GR32:$mask))),
+                                  (_.EltVT (X86selects VK1WM:$mask,
                                                        (_.EltVT _.FRC:$src1),
                                                        (_.EltVT ZeroFP))))))),
           (!cast<Instruction>(InstrStr#rrkz)
-                        (COPY_TO_REGCLASS GR32:$mask, VK1WM),
+                        VK1WM:$mask,
                         (_.VT _.RC:$src0),
                         (COPY_TO_REGCLASS _.FRC:$src1, _.RC))>;
 }

Modified: llvm/trunk/test/CodeGen/X86/avx512-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-load-store.ll?rev=325999&r1=325998&r2=325999&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-load-store.ll Fri Feb 23 16:15:05 2018
@@ -13,8 +13,7 @@ define <4 x float> @test_mm_mask_move_ss
 ; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; CHECK32-NEXT:    kmovw %eax, %k1
-; CHECK32-NEXT:    vmovss %xmm2, %xmm0, %xmm0 {%k1}
-; CHECK32-NEXT:    vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; CHECK32-NEXT:    vmovss %xmm2, %xmm1, %xmm0 {%k1}
 ; CHECK32-NEXT:    retl
 entry:
   %0 = and i8 %__U, 1
@@ -37,9 +36,7 @@ define <4 x float> @test_mm_maskz_move_s
 ; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; CHECK32-NEXT:    kmovw %eax, %k1
-; CHECK32-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; CHECK32-NEXT:    vmovss %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK32-NEXT:    vmovss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
+; CHECK32-NEXT:    vmovss %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; CHECK32-NEXT:    retl
 entry:
   %0 = and i8 %__U, 1
@@ -61,8 +58,7 @@ define <2 x double> @test_mm_mask_move_s
 ; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; CHECK32-NEXT:    kmovw %eax, %k1
-; CHECK32-NEXT:    vmovsd %xmm2, %xmm0, %xmm0 {%k1}
-; CHECK32-NEXT:    vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; CHECK32-NEXT:    vmovsd %xmm2, %xmm1, %xmm0 {%k1}
 ; CHECK32-NEXT:    retl
 entry:
   %0 = and i8 %__U, 1
@@ -85,9 +81,7 @@ define <2 x double> @test_mm_maskz_move_
 ; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; CHECK32-NEXT:    kmovw %eax, %k1
-; CHECK32-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
-; CHECK32-NEXT:    vmovsd %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK32-NEXT:    vmovsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
+; CHECK32-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z}
 ; CHECK32-NEXT:    retl
 entry:
   %0 = and i8 %__U, 1




More information about the llvm-commits mailing list