[llvm] r332049 - [X86] Add new patterns for masked scalar load/store to match clang's codegen from r331958.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu May 10 14:49:16 PDT 2018


Author: ctopper
Date: Thu May 10 14:49:16 2018
New Revision: 332049

URL: http://llvm.org/viewvc/llvm-project?rev=332049&view=rev
Log:
[X86] Add new patterns for masked scalar load/store to match clang's codegen from r331958.

Clang's codegen now uses 128-bit masked load/store intrinsics in IR. The backend will widen to 512-bits on AVX512F targets.

So this patch adds patterns to detect codegen's widening and patterns for AVX512VL that don't get widened.

We may be able to drop some of the old patterns, but I leave that for a future patch.

Modified:
    llvm/trunk/lib/Target/X86/X86InstrAVX512.td
    llvm/trunk/test/CodeGen/X86/avx512-load-store.ll

Modified: llvm/trunk/lib/Target/X86/X86InstrAVX512.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrAVX512.td?rev=332049&r1=332048&r2=332049&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrAVX512.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td Thu May 10 14:49:16 2018
@@ -3876,6 +3876,31 @@ def : Pat<(masked_store addr:$dst, Mask,
 
 }
 
+// This matches the more recent codegen from clang that avoids emitting a 512
+// bit masked store directly. Codegen will widen 128-bit masked store to 512
+// bits on AVX512F only targets.
+multiclass avx512_store_scalar_lowering_subreg2<string InstrStr,
+                                               AVX512VLVectorVTInfo _,
+                                               dag Mask512, dag Mask128,
+                                               RegisterClass MaskRC,
+                                               SubRegIndex subreg> {
+
+// AVX512F pattern.
+def : Pat<(masked_store addr:$dst, Mask512,
+             (_.info512.VT (insert_subvector undef,
+                               (_.info128.VT _.info128.RC:$src),
+                               (iPTR 0)))),
+          (!cast<Instruction>(InstrStr#mrk) addr:$dst,
+                      (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM),
+                      (COPY_TO_REGCLASS _.info128.RC:$src, _.info128.FRC))>;
+
+// AVX512VL pattern.
+def : Pat<(masked_store addr:$dst, Mask128, (_.info128.VT _.info128.RC:$src)),
+          (!cast<Instruction>(InstrStr#mrk) addr:$dst,
+                      (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM),
+                      (COPY_TO_REGCLASS _.info128.RC:$src, _.info128.FRC))>;
+}
+
 multiclass avx512_load_scalar_lowering<string InstrStr, AVX512VLVectorVTInfo _,
                                        dag Mask, RegisterClass MaskRC> {
 
@@ -3926,6 +3951,48 @@ def : Pat<(_.info128.VT (extract_subvect
 
 }
 
+// This matches the more recent codegen from clang that avoids emitting a 512
+// bit masked load directly. Codegen will widen 128-bit masked load to 512
+// bits on AVX512F only targets.
+multiclass avx512_load_scalar_lowering_subreg2<string InstrStr,
+                                              AVX512VLVectorVTInfo _,
+                                              dag Mask512, dag Mask128,
+                                              RegisterClass MaskRC,
+                                              SubRegIndex subreg> {
+// AVX512F patterns.
+def : Pat<(_.info128.VT (extract_subvector
+                         (_.info512.VT (masked_load addr:$srcAddr, Mask512,
+                                        (_.info512.VT (bitconvert
+                                                       (v16i32 immAllZerosV))))),
+                           (iPTR 0))),
+          (!cast<Instruction>(InstrStr#rmkz)
+                      (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM),
+                      addr:$srcAddr)>;
+
+def : Pat<(_.info128.VT (extract_subvector
+                (_.info512.VT (masked_load addr:$srcAddr, Mask512,
+                      (_.info512.VT (insert_subvector undef,
+                            (_.info128.VT (X86vzmovl _.info128.RC:$src)),
+                            (iPTR 0))))),
+                (iPTR 0))),
+          (!cast<Instruction>(InstrStr#rmk) _.info128.RC:$src,
+                      (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM),
+                      addr:$srcAddr)>;
+
+// AVX512Vl patterns.
+def : Pat<(_.info128.VT (masked_load addr:$srcAddr, Mask128,
+                         (_.info128.VT (bitconvert (v4i32 immAllZerosV))))),
+          (!cast<Instruction>(InstrStr#rmkz)
+                      (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM),
+                      addr:$srcAddr)>;
+
+def : Pat<(_.info128.VT (masked_load addr:$srcAddr, Mask128,
+                         (_.info128.VT (X86vzmovl _.info128.RC:$src)))),
+          (!cast<Instruction>(InstrStr#rmk) _.info128.RC:$src,
+                      (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM),
+                      addr:$srcAddr)>;
+}
+
 defm : avx512_move_scalar_lowering<"VMOVSSZ", X86Movss, fp32imm0, v4f32x_info>;
 defm : avx512_move_scalar_lowering<"VMOVSDZ", X86Movsd, fp64imm0, v2f64x_info>;
 
@@ -3936,6 +4003,31 @@ defm : avx512_store_scalar_lowering_subr
 defm : avx512_store_scalar_lowering_subreg<"VMOVSDZ", avx512vl_f64_info,
                    (v8i1 (bitconvert (i8 (and GR8:$mask, (i8 1))))), GR8, sub_8bit>;
 
+defm : avx512_store_scalar_lowering_subreg2<"VMOVSSZ", avx512vl_f32_info,
+                   (v16i1 (insert_subvector
+                           (v16i1 immAllZerosV),
+                           (v4i1 (extract_subvector
+                                  (v8i1 (bitconvert (and GR8:$mask, (i8 1)))),
+                                  (iPTR 0))),
+                           (iPTR 0))),
+                   (v4i1 (extract_subvector
+                          (v8i1 (bitconvert (and GR8:$mask, (i8 1)))),
+                          (iPTR 0))), GR8, sub_8bit>;
+defm : avx512_store_scalar_lowering_subreg2<"VMOVSDZ", avx512vl_f64_info,
+                   (v8i1
+                    (extract_subvector
+                     (v16i1
+                      (insert_subvector
+                       (v16i1 immAllZerosV),
+                       (v2i1 (extract_subvector
+                              (v8i1 (bitconvert (i8 (and GR8:$mask, (i8 1))))),
+                              (iPTR 0))),
+                       (iPTR 0))),
+                     (iPTR 0))),
+                   (v2i1 (extract_subvector
+                          (v8i1 (bitconvert (i8 (and GR8:$mask, (i8 1))))),
+                          (iPTR 0))), GR8, sub_8bit>;
+
 defm : avx512_load_scalar_lowering<"VMOVSSZ", avx512vl_f32_info,
                    (v16i1 (bitconvert (i16 (trunc (and GR32:$mask, (i32 1)))))), GR32>;
 defm : avx512_load_scalar_lowering_subreg<"VMOVSSZ", avx512vl_f32_info,
@@ -3943,6 +4035,31 @@ defm : avx512_load_scalar_lowering_subre
 defm : avx512_load_scalar_lowering_subreg<"VMOVSDZ", avx512vl_f64_info,
                    (v8i1 (bitconvert (i8 (and GR8:$mask, (i8 1))))), GR8, sub_8bit>;
 
+defm : avx512_load_scalar_lowering_subreg2<"VMOVSSZ", avx512vl_f32_info,
+                   (v16i1 (insert_subvector
+                           (v16i1 immAllZerosV),
+                           (v4i1 (extract_subvector
+                                  (v8i1 (bitconvert (and GR8:$mask, (i8 1)))),
+                                  (iPTR 0))),
+                           (iPTR 0))),
+                   (v4i1 (extract_subvector
+                          (v8i1 (bitconvert (and GR8:$mask, (i8 1)))),
+                          (iPTR 0))), GR8, sub_8bit>;
+defm : avx512_load_scalar_lowering_subreg2<"VMOVSDZ", avx512vl_f64_info,
+                   (v8i1
+                    (extract_subvector
+                     (v16i1
+                      (insert_subvector
+                       (v16i1 immAllZerosV),
+                       (v2i1 (extract_subvector
+                              (v8i1 (bitconvert (i8 (and GR8:$mask, (i8 1))))),
+                              (iPTR 0))),
+                       (iPTR 0))),
+                     (iPTR 0))),
+                   (v2i1 (extract_subvector
+                          (v8i1 (bitconvert (i8 (and GR8:$mask, (i8 1))))),
+                          (iPTR 0))), GR8, sub_8bit>;
+
 def : Pat<(f32 (X86selects (scalar_to_vector GR8:$mask),
                            (f32 FR32X:$src1), (f32 FR32X:$src2))),
           (COPY_TO_REGCLASS

Modified: llvm/trunk/test/CodeGen/X86/avx512-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-load-store.ll?rev=332049&r1=332048&r2=332049&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-load-store.ll Thu May 10 14:49:16 2018
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -O2 -mattr=avx512f -mtriple=x86_64-unknown | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK64
 ; RUN: llc < %s -O2 -mattr=avx512f -mtriple=i386-unknown | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK32
+; RUN: llc < %s -O2 -mattr=avx512vl -mtriple=x86_64-unknown | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK64
+; RUN: llc < %s -O2 -mattr=avx512vl -mtriple=i386-unknown | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK32
 
 define <4 x float> @test_mm_mask_move_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) local_unnamed_addr #0 {
 ; CHECK64-LABEL: test_mm_mask_move_ss:
@@ -237,6 +239,149 @@ entry:
   ret <2 x double> %shuffle.i
 }
 
+; The tests below match clang's newer codegen that uses 128-bit masked load/stores.
+
+define void @test_mm_mask_store_ss_2(float* %__P, i8 zeroext %__U, <4 x float> %__A) {
+; CHECK64-LABEL: test_mm_mask_store_ss_2:
+; CHECK64:       # %bb.0: # %entry
+; CHECK64-NEXT:    kmovw %esi, %k1
+; CHECK64-NEXT:    vmovss %xmm0, (%rdi) {%k1}
+; CHECK64-NEXT:    retq
+;
+; CHECK32-LABEL: test_mm_mask_store_ss_2:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; CHECK32-NEXT:    kmovw %ecx, %k1
+; CHECK32-NEXT:    vmovss %xmm0, (%eax) {%k1}
+; CHECK32-NEXT:    retl
+entry:
+  %0 = bitcast float* %__P to <4 x float>*
+  %1 = and i8 %__U, 1
+  %2 = bitcast i8 %1 to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %2, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %__A, <4 x float>* %0, i32 1, <4 x i1> %extract.i)
+  ret void
+}
+
+define void @test_mm_mask_store_sd_2(double* %__P, i8 zeroext %__U, <2 x double> %__A) {
+; CHECK64-LABEL: test_mm_mask_store_sd_2:
+; CHECK64:       # %bb.0: # %entry
+; CHECK64-NEXT:    kmovw %esi, %k1
+; CHECK64-NEXT:    vmovsd %xmm0, (%rdi) {%k1}
+; CHECK64-NEXT:    retq
+;
+; CHECK32-LABEL: test_mm_mask_store_sd_2:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; CHECK32-NEXT:    kmovw %ecx, %k1
+; CHECK32-NEXT:    vmovsd %xmm0, (%eax) {%k1}
+; CHECK32-NEXT:    retl
+entry:
+  %0 = bitcast double* %__P to <2 x double>*
+  %1 = and i8 %__U, 1
+  %2 = bitcast i8 %1 to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %2, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  tail call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %__A, <2 x double>* %0, i32 1, <2 x i1> %extract.i)
+  ret void
+}
+
+define <4 x float> @test_mm_mask_load_ss_2(<4 x float> %__A, i8 zeroext %__U, float* readonly %__W) {
+; CHECK64-LABEL: test_mm_mask_load_ss_2:
+; CHECK64:       # %bb.0: # %entry
+; CHECK64-NEXT:    kmovw %edi, %k1
+; CHECK64-NEXT:    vmovss (%rsi), %xmm0 {%k1}
+; CHECK64-NEXT:    retq
+;
+; CHECK32-LABEL: test_mm_mask_load_ss_2:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; CHECK32-NEXT:    kmovw %ecx, %k1
+; CHECK32-NEXT:    vmovss (%eax), %xmm0 {%k1}
+; CHECK32-NEXT:    retl
+entry:
+  %shuffle.i = shufflevector <4 x float> %__A, <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <4 x i32> <i32 0, i32 4, i32 4, i32 4>
+  %0 = bitcast float* %__W to <4 x float>*
+  %1 = and i8 %__U, 1
+  %2 = bitcast i8 %1 to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %2, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 1, <4 x i1> %extract.i, <4 x float> %shuffle.i)
+  ret <4 x float> %3
+}
+
+define <4 x float> @test_mm_maskz_load_ss_2(i8 zeroext %__U, float* readonly %__W) {
+; CHECK64-LABEL: test_mm_maskz_load_ss_2:
+; CHECK64:       # %bb.0: # %entry
+; CHECK64-NEXT:    kmovw %edi, %k1
+; CHECK64-NEXT:    vmovss (%rsi), %xmm0 {%k1} {z}
+; CHECK64-NEXT:    retq
+;
+; CHECK32-LABEL: test_mm_maskz_load_ss_2:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; CHECK32-NEXT:    kmovw %ecx, %k1
+; CHECK32-NEXT:    vmovss (%eax), %xmm0 {%k1} {z}
+; CHECK32-NEXT:    retl
+entry:
+  %0 = bitcast float* %__W to <4 x float>*
+  %1 = and i8 %__U, 1
+  %2 = bitcast i8 %1 to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %2, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %3 = tail call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 1, <4 x i1> %extract.i, <4 x float> zeroinitializer)
+  ret <4 x float> %3
+}
+
+define <2 x double> @test_mm_mask_load_sd_2(<2 x double> %__A, i8 zeroext %__U, double* readonly %__W) {
+; CHECK64-LABEL: test_mm_mask_load_sd_2:
+; CHECK64:       # %bb.0: # %entry
+; CHECK64-NEXT:    kmovw %edi, %k1
+; CHECK64-NEXT:    vmovsd (%rsi), %xmm0 {%k1}
+; CHECK64-NEXT:    retq
+;
+; CHECK32-LABEL: test_mm_mask_load_sd_2:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; CHECK32-NEXT:    kmovw %ecx, %k1
+; CHECK32-NEXT:    vmovsd (%eax), %xmm0 {%k1}
+; CHECK32-NEXT:    retl
+entry:
+  %shuffle3.i = insertelement <2 x double> %__A, double 0.000000e+00, i32 1
+  %0 = bitcast double* %__W to <2 x double>*
+  %1 = and i8 %__U, 1
+  %2 = bitcast i8 %1 to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %2, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  %3 = tail call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %0, i32 1, <2 x i1> %extract.i, <2 x double> %shuffle3.i)
+  ret <2 x double> %3
+}
+
+define <2 x double> @test_mm_maskz_load_sd_2(i8 zeroext %__U, double* readonly %__W) {
+; CHECK64-LABEL: test_mm_maskz_load_sd_2:
+; CHECK64:       # %bb.0: # %entry
+; CHECK64-NEXT:    kmovw %edi, %k1
+; CHECK64-NEXT:    vmovsd (%rsi), %xmm0 {%k1} {z}
+; CHECK64-NEXT:    retq
+;
+; CHECK32-LABEL: test_mm_maskz_load_sd_2:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; CHECK32-NEXT:    kmovw %ecx, %k1
+; CHECK32-NEXT:    vmovsd (%eax), %xmm0 {%k1} {z}
+; CHECK32-NEXT:    retl
+entry:
+  %0 = bitcast double* %__W to <2 x double>*
+  %1 = and i8 %__U, 1
+  %2 = bitcast i8 %1 to <8 x i1>
+  %extract.i = shufflevector <8 x i1> %2, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+  %3 = tail call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %0, i32 1, <2 x i1> %extract.i, <2 x double> zeroinitializer)
+  ret <2 x double> %3
+}
+
+
 declare void @llvm.masked.store.v16f32.p0v16f32(<16 x float>, <16 x float>*, i32, <16 x i1>) #3
 
 declare void @llvm.masked.store.v8f64.p0v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>) #3
@@ -244,3 +389,11 @@ declare void @llvm.masked.store.v8f64.p0
 declare <16 x float> @llvm.masked.load.v16f32.p0v16f32(<16 x float>*, i32, <16 x i1>, <16 x float>) #4
 
 declare <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>) #4
+
+declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>)
+
+declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double>, <2 x double>*, i32, <2 x i1>)
+
+declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
+
+declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>*, i32, <2 x i1>, <2 x double>)




More information about the llvm-commits mailing list