[llvm] r336457 - [X86] Add more FMA3 memory folding patterns. Remove patterns that are no longer needed.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 6 11:47:55 PDT 2018


Author: ctopper
Date: Fri Jul  6 11:47:55 2018
New Revision: 336457

URL: http://llvm.org/viewvc/llvm-project?rev=336457&view=rev
Log:
[X86] Add more FMA3 memory folding patterns. Remove patterns that are no longer needed.

We've removed the legacy FMA3 intrinsics and are now using llvm.fma and extractelement/insertelement. So we don't need patterns for the nodes that could only be created by the old intrinscis. Those ISD opcodes still exist because we haven't dropped the AVX512 intrinsics yet, but those should go to EVEX instructions.

Modified:
    llvm/trunk/lib/Target/X86/X86InstrAVX512.td
    llvm/trunk/lib/Target/X86/X86InstrFMA.td

Modified: llvm/trunk/lib/Target/X86/X86InstrAVX512.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrAVX512.td?rev=336457&r1=336456&r2=336457&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrAVX512.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td Fri Jul  6 11:47:55 2018
@@ -6865,6 +6865,22 @@ multiclass avx512_scalar_fma_patterns<SD
                (COPY_TO_REGCLASS _.FRC:$src3, VR128X))>;
 
     def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
+                (Op _.FRC:$src2,
+                    (_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0))),
+                    (_.ScalarLdFrag addr:$src3)))))),
+              (!cast<I>(Prefix#"213"#Suffix#"Zm_Int")
+               VR128X:$src1, (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
+               addr:$src3)>;
+
+    def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
+                (Op (_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0))),
+                    (_.ScalarLdFrag addr:$src3), _.FRC:$src2))))),
+              (!cast<I>(Prefix#"132"#Suffix#"Zm_Int")
+               VR128X:$src1, (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
+               addr:$src3)>;
+
+    // TODO: Add memory patterns.
+    def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
                (X86selects VK1WM:$mask,
                 (Op _.FRC:$src2,
                     (_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0))),

Modified: llvm/trunk/lib/Target/X86/X86InstrFMA.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrFMA.td?rev=336457&r1=336456&r2=336457&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrFMA.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrFMA.td Fri Jul  6 11:47:55 2018
@@ -317,41 +317,6 @@ multiclass fma3s<bits<8> opc132, bits<8>
                         FR64, f64mem, sched>,
               fma3s_int_forms<opc132, opc213, opc231, OpStr, "sd", "SD",
                               VR128, sdmem, sched>, VEX_W;
-
-  // These patterns use the 123 ordering, instead of 213, even though
-  // they match the intrinsic to the 213 version of the instruction.
-  // This is because src1 is tied to dest, and the scalar intrinsics
-  // require the pass-through values to come from the first source
-  // operand, not the second.
-  let Predicates = [HasFMA, NoAVX512] in {
-    def : Pat<(v4f32 (OpNodeIntrin VR128:$src1, VR128:$src2, VR128:$src3)),
-              (!cast<Instruction>(NAME#"213SSr_Int")
-               VR128:$src1, VR128:$src2, VR128:$src3)>;
-
-    def : Pat<(v2f64 (OpNodeIntrin VR128:$src1, VR128:$src2, VR128:$src3)),
-              (!cast<Instruction>(NAME#"213SDr_Int")
-               VR128:$src1, VR128:$src2, VR128:$src3)>;
-
-    def : Pat<(v4f32 (OpNodeIntrin VR128:$src1, VR128:$src2,
-                                   sse_load_f32:$src3)),
-              (!cast<Instruction>(NAME#"213SSm_Int")
-               VR128:$src1, VR128:$src2, sse_load_f32:$src3)>;
-
-    def : Pat<(v2f64 (OpNodeIntrin VR128:$src1, VR128:$src2,
-                                   sse_load_f64:$src3)),
-              (!cast<Instruction>(NAME#"213SDm_Int")
-               VR128:$src1, VR128:$src2, sse_load_f64:$src3)>;
-
-    def : Pat<(v4f32 (OpNodeIntrin VR128:$src1, sse_load_f32:$src3,
-                                   VR128:$src2)),
-              (!cast<Instruction>(NAME#"132SSm_Int")
-               VR128:$src1, VR128:$src2, sse_load_f32:$src3)>;
-
-    def : Pat<(v2f64 (OpNodeIntrin VR128:$src1, sse_load_f64:$src3,
-                                   VR128:$src2)),
-              (!cast<Instruction>(NAME#"132SDm_Int")
-               VR128:$src1, VR128:$src2, sse_load_f64:$src3)>;
-  }
 }
 
 defm VFMADD : fma3s<0x99, 0xA9, 0xB9, "vfmadd", X86Fmadds1, X86Fmadd,
@@ -366,7 +331,7 @@ defm VFNMSUB : fma3s<0x9F, 0xAF, 0xBF, "
 
 multiclass scalar_fma_patterns<SDNode Op, string Prefix, string Suffix,
                                SDNode Move, ValueType VT, ValueType EltVT,
-                               RegisterClass RC> {
+                               RegisterClass RC, PatFrag mem_frag> {
   let Predicates = [HasFMA, NoAVX512] in {
     def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector
                 (Op RC:$src2,
@@ -375,18 +340,33 @@ multiclass scalar_fma_patterns<SDNode Op
               (!cast<Instruction>(Prefix#"213"#Suffix#"r_Int")
                VR128:$src1, (COPY_TO_REGCLASS RC:$src2, VR128),
                (COPY_TO_REGCLASS RC:$src3, VR128))>;
+
+    def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector
+                (Op RC:$src2,
+                    (EltVT (extractelt (VT VR128:$src1), (iPTR 0))),
+                    (mem_frag addr:$src3)))))),
+              (!cast<Instruction>(Prefix#"213"#Suffix#"m_Int")
+               VR128:$src1, (COPY_TO_REGCLASS RC:$src2, VR128),
+               addr:$src3)>;
+
+    def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector
+                (Op (EltVT (extractelt (VT VR128:$src1), (iPTR 0))),
+                    (mem_frag addr:$src3), RC:$src2))))),
+              (!cast<Instruction>(Prefix#"132"#Suffix#"m_Int")
+               VR128:$src1, (COPY_TO_REGCLASS RC:$src2, VR128),
+               addr:$src3)>;
   }
 }
 
-defm : scalar_fma_patterns<X86Fmadd, "VFMADD", "SS", X86Movss, v4f32, f32, FR32>;
-defm : scalar_fma_patterns<X86Fmsub, "VFMSUB", "SS", X86Movss, v4f32, f32, FR32>;
-defm : scalar_fma_patterns<X86Fnmadd, "VFNMADD", "SS", X86Movss, v4f32, f32, FR32>;
-defm : scalar_fma_patterns<X86Fnmsub, "VFNMSUB", "SS", X86Movss, v4f32, f32, FR32>;
-
-defm : scalar_fma_patterns<X86Fmadd, "VFMADD", "SD", X86Movsd, v2f64, f64, FR64>;
-defm : scalar_fma_patterns<X86Fmsub, "VFMSUB", "SD", X86Movsd, v2f64, f64, FR64>;
-defm : scalar_fma_patterns<X86Fnmadd, "VFNMADD", "SD", X86Movsd, v2f64, f64, FR64>;
-defm : scalar_fma_patterns<X86Fnmsub, "VFNMSUB", "SD", X86Movsd, v2f64, f64, FR64>;
+defm : scalar_fma_patterns<X86Fmadd, "VFMADD", "SS", X86Movss, v4f32, f32, FR32, loadf32>;
+defm : scalar_fma_patterns<X86Fmsub, "VFMSUB", "SS", X86Movss, v4f32, f32, FR32, loadf32>;
+defm : scalar_fma_patterns<X86Fnmadd, "VFNMADD", "SS", X86Movss, v4f32, f32, FR32, loadf32>;
+defm : scalar_fma_patterns<X86Fnmsub, "VFNMSUB", "SS", X86Movss, v4f32, f32, FR32, loadf32>;
+
+defm : scalar_fma_patterns<X86Fmadd, "VFMADD", "SD", X86Movsd, v2f64, f64, FR64, loadf64>;
+defm : scalar_fma_patterns<X86Fmsub, "VFMSUB", "SD", X86Movsd, v2f64, f64, FR64, loadf64>;
+defm : scalar_fma_patterns<X86Fnmadd, "VFNMADD", "SD", X86Movsd, v2f64, f64, FR64, loadf64>;
+defm : scalar_fma_patterns<X86Fnmsub, "VFNMSUB", "SD", X86Movsd, v2f64, f64, FR64, loadf64>;
 
 //===----------------------------------------------------------------------===//
 // FMA4 - AMD 4 operand Fused Multiply-Add instructions
@@ -606,7 +586,7 @@ let ExeDomain = SSEPackedDouble in {
 }
 
 multiclass scalar_fma4_patterns<SDNode Op, string Name,
-                               SDNode Move, ValueType VT, ValueType EltVT,
+                               ValueType VT, ValueType EltVT,
                                RegisterClass RC, PatFrag mem_frag> {
   let Predicates = [HasFMA4] in {
     let AddedComplexity = 15 in
@@ -633,12 +613,12 @@ multiclass scalar_fma4_patterns<SDNode O
   }
 }
 
-defm : scalar_fma4_patterns<X86Fmadd, "VFMADDSS4", X86Movss, v4f32, f32, FR32, loadf32>;
-defm : scalar_fma4_patterns<X86Fmsub, "VFMSUBSS4", X86Movss, v4f32, f32, FR32, loadf32>;
-defm : scalar_fma4_patterns<X86Fnmadd, "VFNMADDSS4", X86Movss, v4f32, f32, FR32, loadf32>;
-defm : scalar_fma4_patterns<X86Fnmsub, "VFNMSUBSS4", X86Movss, v4f32, f32, FR32, loadf32>;
-
-defm : scalar_fma4_patterns<X86Fmadd, "VFMADDSD4", X86Movsd, v2f64, f64, FR64, loadf64>;
-defm : scalar_fma4_patterns<X86Fmsub, "VFMSUBSD4", X86Movsd, v2f64, f64, FR64, loadf64>;
-defm : scalar_fma4_patterns<X86Fnmadd, "VFNMADDSD4", X86Movsd, v2f64, f64, FR64, loadf64>;
-defm : scalar_fma4_patterns<X86Fnmsub, "VFNMSUBSD4", X86Movsd, v2f64, f64, FR64, loadf64>;
+defm : scalar_fma4_patterns<X86Fmadd, "VFMADDSS4", v4f32, f32, FR32, loadf32>;
+defm : scalar_fma4_patterns<X86Fmsub, "VFMSUBSS4", v4f32, f32, FR32, loadf32>;
+defm : scalar_fma4_patterns<X86Fnmadd, "VFNMADDSS4", v4f32, f32, FR32, loadf32>;
+defm : scalar_fma4_patterns<X86Fnmsub, "VFNMSUBSS4", v4f32, f32, FR32, loadf32>;
+
+defm : scalar_fma4_patterns<X86Fmadd, "VFMADDSD4", v2f64, f64, FR64, loadf64>;
+defm : scalar_fma4_patterns<X86Fmsub, "VFMSUBSD4", v2f64, f64, FR64, loadf64>;
+defm : scalar_fma4_patterns<X86Fnmadd, "VFNMADDSD4", v2f64, f64, FR64, loadf64>;
+defm : scalar_fma4_patterns<X86Fnmsub, "VFNMSUBSD4", v2f64, f64, FR64, loadf64>;




More information about the llvm-commits mailing list