[llvm] r336828 - [X86] Remove patterns for inserting a load into a zero vector.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 11 11:09:04 PDT 2018


Author: ctopper
Date: Wed Jul 11 11:09:04 2018
New Revision: 336828

URL: http://llvm.org/viewvc/llvm-project?rev=336828&view=rev
Log:
[X86] Remove patterns for inserting a load into a zero vector.

We can instead block the load folding isProfitableToFold. Then isel will emit a register->register move for the zeroing part and a separate load. The PostProcessISelDAG should be able to remove the register->register move.

This saves us patterns and fixes the fact that we only had unaligned load patterns. The test changes show places where we should have been using an aligned load.

Modified:
    llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
    llvm/trunk/lib/Target/X86/X86InstrVecCompiler.td
    llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-256.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp?rev=336828&r1=336827&r2=336828&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp Wed Jul 11 11:09:04 2018
@@ -614,9 +614,12 @@ X86DAGToDAGISel::IsProfitableToFold(SDVa
     }
   }
 
-  // Prevent folding a load if this can implemented with an insert_subreg.
+  // Prevent folding a load if this can implemented with an insert_subreg or
+  // a move that implicitly zeroes.
   if (Root->getOpcode() == ISD::INSERT_SUBVECTOR &&
-      Root->getOperand(0).isUndef() && isNullConstant(Root->getOperand(2)))
+      isNullConstant(Root->getOperand(2)) &&
+      (Root->getOperand(0).isUndef() ||
+       ISD::isBuildVectorAllZeros(Root->getOperand(0).getNode())))
     return false;
 
   return true;

Modified: llvm/trunk/lib/Target/X86/X86InstrVecCompiler.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrVecCompiler.td?rev=336828&r1=336827&r2=336828&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrVecCompiler.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrVecCompiler.td Wed Jul 11 11:09:04 2018
@@ -256,107 +256,64 @@ let Predicates = [HasVLX] in {
 }
 
 // If we're inserting into an all zeros vector, just use a plain move which
-// will zero the upper bits.
-// TODO: Is there a safe way to detect whether the producing instruction
-// already zeroed the upper bits?
-multiclass subvector_zero_lowering<string MoveStr, string LoadStr,
-                                   RegisterClass RC, ValueType DstTy,
-                                   ValueType SrcTy, ValueType ZeroTy,
-                                   PatFrag memop, SubRegIndex SubIdx> {
+// will zero the upper bits. A post-isel hook will take care of removing
+// any moves that we can prove are unnecessary.
+multiclass subvec_zero_lowering<string MoveStr,
+                                RegisterClass RC, ValueType DstTy,
+                                ValueType SrcTy, ValueType ZeroTy,
+                                SubRegIndex SubIdx> {
   def : Pat<(DstTy (insert_subvector (bitconvert (ZeroTy immAllZerosV)),
                                      (SrcTy RC:$src), (iPTR 0))),
             (SUBREG_TO_REG (i64 0),
              (!cast<Instruction>("VMOV"#MoveStr#"rr") RC:$src), SubIdx)>;
-
-  def : Pat<(DstTy (insert_subvector (bitconvert (ZeroTy immAllZerosV)),
-                                     (SrcTy (bitconvert (memop addr:$src))),
-                                     (iPTR 0))),
-            (SUBREG_TO_REG (i64 0),
-             (!cast<Instruction>("VMOV"#LoadStr#"rm") addr:$src), SubIdx)>;
 }
 
 let Predicates = [HasAVX, NoVLX] in {
-  defm : subvector_zero_lowering<"APD", "UPD", VR128, v4f64, v2f64, v8i32,
-                                 loadv2f64, sub_xmm>;
-  defm : subvector_zero_lowering<"APS", "UPS", VR128, v8f32, v4f32, v8i32,
-                                 loadv4f32, sub_xmm>;
-  defm : subvector_zero_lowering<"DQA", "DQU", VR128, v4i64, v2i64, v8i32,
-                                 loadv2i64, sub_xmm>;
-  defm : subvector_zero_lowering<"DQA", "DQU", VR128, v8i32, v4i32, v8i32,
-                                 loadv2i64, sub_xmm>;
-  defm : subvector_zero_lowering<"DQA", "DQU", VR128, v16i16, v8i16, v8i32,
-                                 loadv2i64, sub_xmm>;
-  defm : subvector_zero_lowering<"DQA", "DQU", VR128, v32i8, v16i8, v8i32,
-                                 loadv2i64, sub_xmm>;
+  defm : subvec_zero_lowering<"APD", VR128, v4f64, v2f64, v8i32, sub_xmm>;
+  defm : subvec_zero_lowering<"APS", VR128, v8f32, v4f32, v8i32, sub_xmm>;
+  defm : subvec_zero_lowering<"DQA", VR128, v4i64, v2i64, v8i32, sub_xmm>;
+  defm : subvec_zero_lowering<"DQA", VR128, v8i32, v4i32, v8i32, sub_xmm>;
+  defm : subvec_zero_lowering<"DQA", VR128, v16i16, v8i16, v8i32, sub_xmm>;
+  defm : subvec_zero_lowering<"DQA", VR128, v32i8, v16i8, v8i32, sub_xmm>;
 }
 
 let Predicates = [HasVLX] in {
-  defm : subvector_zero_lowering<"APDZ128", "UPDZ128", VR128X, v4f64,
-                                 v2f64, v8i32, loadv2f64, sub_xmm>;
-  defm : subvector_zero_lowering<"APSZ128", "UPSZ128", VR128X, v8f32,
-                                 v4f32, v8i32, loadv4f32, sub_xmm>;
-  defm : subvector_zero_lowering<"DQA64Z128", "DQU64Z128", VR128X, v4i64,
-                                 v2i64, v8i32, loadv2i64, sub_xmm>;
-  defm : subvector_zero_lowering<"DQA64Z128", "DQU64Z128", VR128X, v8i32,
-                                 v4i32, v8i32, loadv2i64, sub_xmm>;
-  defm : subvector_zero_lowering<"DQA64Z128", "DQU64Z128", VR128X, v16i16,
-                                 v8i16, v8i32, loadv2i64, sub_xmm>;
-  defm : subvector_zero_lowering<"DQA64Z128", "DQU64Z128", VR128X, v32i8,
-                                 v16i8, v8i32, loadv2i64, sub_xmm>;
-
-  defm : subvector_zero_lowering<"APDZ128", "UPDZ128", VR128X, v8f64,
-                                 v2f64, v16i32, loadv2f64, sub_xmm>;
-  defm : subvector_zero_lowering<"APSZ128", "UPSZ128", VR128X, v16f32,
-                                 v4f32, v16i32, loadv4f32, sub_xmm>;
-  defm : subvector_zero_lowering<"DQA64Z128", "DQU64Z128", VR128X, v8i64,
-                                 v2i64, v16i32, loadv2i64, sub_xmm>;
-  defm : subvector_zero_lowering<"DQA64Z128", "DQU64Z128", VR128X, v16i32,
-                                 v4i32, v16i32, loadv2i64, sub_xmm>;
-  defm : subvector_zero_lowering<"DQA64Z128", "DQU64Z128", VR128X, v32i16,
-                                 v8i16, v16i32, loadv2i64, sub_xmm>;
-  defm : subvector_zero_lowering<"DQA64Z128", "DQU64Z128", VR128X, v64i8,
-                                 v16i8, v16i32, loadv2i64, sub_xmm>;
-
-  defm : subvector_zero_lowering<"APDZ256", "UPDZ256", VR256X, v8f64,
-                                 v4f64, v16i32, loadv4f64, sub_ymm>;
-  defm : subvector_zero_lowering<"APSZ256", "UPDZ256", VR256X, v16f32,
-                                 v8f32, v16i32, loadv8f32, sub_ymm>;
-  defm : subvector_zero_lowering<"DQA64Z256", "DQU64Z256", VR256X, v8i64,
-                                 v4i64, v16i32, loadv4i64, sub_ymm>;
-  defm : subvector_zero_lowering<"DQA64Z256", "DQU64Z256", VR256X, v16i32,
-                                 v8i32, v16i32, loadv4i64, sub_ymm>;
-  defm : subvector_zero_lowering<"DQA64Z256", "DQU64Z256", VR256X, v32i16,
-                                 v16i16, v16i32, loadv4i64, sub_ymm>;
-  defm : subvector_zero_lowering<"DQA64Z256", "DQU64Z256", VR256X, v64i8,
-                                 v32i8, v16i32, loadv4i64, sub_ymm>;
+  defm : subvec_zero_lowering<"APDZ128", VR128X, v4f64, v2f64, v8i32, sub_xmm>;
+  defm : subvec_zero_lowering<"APSZ128", VR128X, v8f32, v4f32, v8i32, sub_xmm>;
+  defm : subvec_zero_lowering<"DQA64Z128", VR128X, v4i64, v2i64, v8i32, sub_xmm>;
+  defm : subvec_zero_lowering<"DQA64Z128", VR128X, v8i32, v4i32, v8i32, sub_xmm>;
+  defm : subvec_zero_lowering<"DQA64Z128", VR128X, v16i16, v8i16, v8i32, sub_xmm>;
+  defm : subvec_zero_lowering<"DQA64Z128", VR128X, v32i8, v16i8, v8i32, sub_xmm>;
+
+  defm : subvec_zero_lowering<"APDZ128", VR128X, v8f64, v2f64, v16i32, sub_xmm>;
+  defm : subvec_zero_lowering<"APSZ128", VR128X, v16f32, v4f32, v16i32, sub_xmm>;
+  defm : subvec_zero_lowering<"DQA64Z128", VR128X, v8i64, v2i64, v16i32, sub_xmm>;
+  defm : subvec_zero_lowering<"DQA64Z128", VR128X, v16i32, v4i32, v16i32, sub_xmm>;
+  defm : subvec_zero_lowering<"DQA64Z128", VR128X, v32i16, v8i16, v16i32, sub_xmm>;
+  defm : subvec_zero_lowering<"DQA64Z128", VR128X, v64i8, v16i8, v16i32, sub_xmm>;
+
+  defm : subvec_zero_lowering<"APDZ256", VR256X, v8f64, v4f64, v16i32, sub_ymm>;
+  defm : subvec_zero_lowering<"APSZ256", VR256X, v16f32, v8f32, v16i32, sub_ymm>;
+  defm : subvec_zero_lowering<"DQA64Z256", VR256X, v8i64, v4i64, v16i32, sub_ymm>;
+  defm : subvec_zero_lowering<"DQA64Z256", VR256X, v16i32, v8i32, v16i32, sub_ymm>;
+  defm : subvec_zero_lowering<"DQA64Z256", VR256X, v32i16, v16i16, v16i32, sub_ymm>;
+  defm : subvec_zero_lowering<"DQA64Z256", VR256X, v64i8, v32i8, v16i32, sub_ymm>;
 }
 
 let Predicates = [HasAVX512, NoVLX] in {
-  defm : subvector_zero_lowering<"APD", "UPD", VR128, v8f64, v2f64,
-                                 v16i32,loadv2f64, sub_xmm>;
-  defm : subvector_zero_lowering<"APS", "UPS", VR128, v16f32, v4f32,
-                                 v16i32, loadv4f32, sub_xmm>;
-  defm : subvector_zero_lowering<"DQA", "DQU", VR128, v8i64, v2i64,
-                                 v16i32, loadv2i64, sub_xmm>;
-  defm : subvector_zero_lowering<"DQA", "DQU", VR128, v16i32, v4i32,
-                                 v16i32, loadv2i64, sub_xmm>;
-  defm : subvector_zero_lowering<"DQA", "DQU", VR128, v32i16, v8i16,
-                                 v16i32, loadv2i64, sub_xmm>;
-  defm : subvector_zero_lowering<"DQA", "DQU", VR128, v64i8, v16i8,
-                                 v16i32, loadv2i64, sub_xmm>;
-
-  defm : subvector_zero_lowering<"APDY", "UPDY", VR256, v8f64, v4f64,
-                                 v16i32, loadv4f64, sub_ymm>;
-  defm : subvector_zero_lowering<"APSY", "UPSY", VR256, v16f32, v8f32,
-                                 v16i32, loadv8f32, sub_ymm>;
-  defm : subvector_zero_lowering<"DQAY", "DQUY", VR256, v8i64, v4i64,
-                                 v16i32, loadv4i64, sub_ymm>;
-  defm : subvector_zero_lowering<"DQAY", "DQUY", VR256, v16i32, v8i32,
-                                 v16i32, loadv4i64, sub_ymm>;
-  defm : subvector_zero_lowering<"DQAY", "DQUY", VR256, v32i16, v16i16,
-                                 v16i32, loadv4i64, sub_ymm>;
-  defm : subvector_zero_lowering<"DQAY", "DQUY", VR256, v64i8, v32i8,
-                                 v16i32, loadv4i64, sub_ymm>;
+  defm : subvec_zero_lowering<"APD", VR128, v8f64, v2f64, v16i32, sub_xmm>;
+  defm : subvec_zero_lowering<"APS", VR128, v16f32, v4f32, v16i32, sub_xmm>;
+  defm : subvec_zero_lowering<"DQA", VR128, v8i64, v2i64, v16i32, sub_xmm>;
+  defm : subvec_zero_lowering<"DQA", VR128, v16i32, v4i32, v16i32, sub_xmm>;
+  defm : subvec_zero_lowering<"DQA", VR128, v32i16, v8i16, v16i32, sub_xmm>;
+  defm : subvec_zero_lowering<"DQA", VR128, v64i8, v16i8, v16i32, sub_xmm>;
+
+  defm : subvec_zero_lowering<"APDY", VR256, v8f64, v4f64, v16i32, sub_ymm>;
+  defm : subvec_zero_lowering<"APSY", VR256, v16f32, v8f32, v16i32, sub_ymm>;
+  defm : subvec_zero_lowering<"DQAY", VR256, v8i64, v4i64, v16i32, sub_ymm>;
+  defm : subvec_zero_lowering<"DQAY", VR256, v16i32, v8i32, v16i32, sub_ymm>;
+  defm : subvec_zero_lowering<"DQAY", VR256, v32i16, v16i16, v16i32, sub_ymm>;
+  defm : subvec_zero_lowering<"DQAY", VR256, v64i8, v32i8, v16i32, sub_ymm>;
 }
 
 class maskzeroupper<ValueType vt, RegisterClass RC> :

Modified: llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-256.ll?rev=336828&r1=336827&r2=336828&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-256.ll Wed Jul 11 11:09:04 2018
@@ -28,13 +28,13 @@ define <4 x double> @merge_4f64_2f64_23(
 define <4 x double> @merge_4f64_2f64_2z(<2 x double>* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_4f64_2f64_2z:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovups 32(%rdi), %xmm0
+; AVX-NEXT:    vmovaps 32(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_4f64_2f64_2z:
 ; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovups 32(%eax), %xmm0
+; X32-AVX-NEXT:    vmovaps 32(%eax), %xmm0
 ; X32-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 2
   %val0 = load <2 x double>, <2 x double>* %ptr0
@@ -155,13 +155,13 @@ define <4 x double> @merge_4f64_f64_34z6
 define <4 x i64> @merge_4i64_2i64_3z(<2 x i64>* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_4i64_2i64_3z:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovups 48(%rdi), %xmm0
+; AVX-NEXT:    vmovaps 48(%rdi), %xmm0
 ; AVX-NEXT:    retq
 ;
 ; X32-AVX-LABEL: merge_4i64_2i64_3z:
 ; X32-AVX:       # %bb.0:
 ; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT:    vmovups 48(%eax), %xmm0
+; X32-AVX-NEXT:    vmovaps 48(%eax), %xmm0
 ; X32-AVX-NEXT:    retl
   %ptr0 = getelementptr inbounds <2 x i64>, <2 x i64>* %ptr, i64 3
   %val0 = load <2 x i64>, <2 x i64>* %ptr0




More information about the llvm-commits mailing list