[llvm-commits] [llvm] r157804 - in /llvm/trunk: lib/Target/X86/X86InstrFMA.td test/CodeGen/X86/fma3.ll

Owen Anderson resistor at mac.com
Fri Jun 1 09:23:59 PDT 2012


Hi Craig,

You can go ahead and write patterns that match the fma SDNode today.  At the moment, this is only generated when the user explicitly wrote a call to fma(), so matching it won't break existing semantics.  Lang's FP_CONTRACT proposal involves introducing @llvm.fmuladd(), which is an optionally fused mul-add, and will be lowered out before SelectionDAG pattern matching.

--Owen

On May 31, 2012, at 11:07 PM, Craig Topper <craig.topper at gmail.com> wrote:

> Author: ctopper
> Date: Fri Jun  1 01:07:48 2012
> New Revision: 157804
> 
> URL: http://llvm.org/viewvc/llvm-project?rev=157804&view=rev
> Log:
> Remove fadd(fmul) patterns for FMA3. This needs to be implemented by paying attention to FP_CONTRACT and matching @llvm.fma which is not available yet. This will allow us to enablle intrinsic use at least though.
> 
> Removed:
>    llvm/trunk/test/CodeGen/X86/fma3.ll
> Modified:
>    llvm/trunk/lib/Target/X86/X86InstrFMA.td
> 
> Modified: llvm/trunk/lib/Target/X86/X86InstrFMA.td
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrFMA.td?rev=157804&r1=157803&r2=157804&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86InstrFMA.td (original)
> +++ llvm/trunk/lib/Target/X86/X86InstrFMA.td Fri Jun  1 01:07:48 2012
> @@ -113,162 +113,6 @@
>     memopv4f64, int_x86_fma4_vfnmsub_pd, int_x86_fma4_vfnmsub_pd_256>, VEX_W;
> }
> 
> -let Predicates = [HasFMA3], AddedComplexity = 20 in {
> -//------------
> -// FP double precision ADD - 256
> -//------------
> -
> -// FMA231: src1 = src2*src3 + src1
> -def : Pat<(v4f64 (fadd (fmul VR256:$src2, (memopv4f64 addr:$src3)), VR256:$src1)),
> -        (VFMADDPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
> -
> -// FMA231: src1 = src2*src3 + src1
> -def : Pat<(v4f64 (fadd (fmul VR256:$src2, VR256:$src3), VR256:$src1)),
> -        (VFMADDPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
> -
> -
> -//------------
> -// FP double precision ADD - 128
> -//------------
> -
> -
> -// FMA231: src1 = src2*src3 + src1
> -def : Pat<(v2f64 (fadd (fmul VR128:$src2, (memopv2f64 addr:$src3)), VR128:$src1)),
> -            (VFMADDPDr231m VR128:$src1, VR128:$src2, addr:$src3)>;
> -
> -// FMA231: src1 = src2*src3 + src1
> -def : Pat<(v2f64 (fadd (fmul VR128:$src2, VR128:$src3), VR128:$src1)),
> -            (VFMADDPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
> -
> -//------------
> -// FP double precision SUB - 256
> -//------------
> -// FMA231: src1 = src2*src3 - src1
> -def : Pat<(v4f64 (fsub (fmul VR256:$src2, (memopv4f64 addr:$src3)), VR256:$src1)),
> -           (VFMSUBPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
> -
> -// FMA231: src1 = src2*src3 - src1
> -def : Pat<(v4f64 (fsub (fmul VR256:$src2, VR256:$src3), VR256:$src1)),
> -            (VFMSUBPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
> -
> -
> -//------------
> -// FP double precision SUB - 128
> -//------------
> -
> -// FMA231: src1 = src2*src3 - src1
> -def : Pat<(v2f64 (fsub (fmul VR128:$src2, (memopv2f64 addr:$src3)), VR128:$src1)),
> -            (VFMSUBPDr231m VR128:$src1, VR128:$src2, addr:$src3)>;
> -
> -// FMA231: src1 = src2*src3 - src1
> -def : Pat<(v2f64 (fsub (fmul VR128:$src2, VR128:$src3), VR128:$src1)),
> -            (VFMSUBPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
> -
> -//------------
> -// FP double precision FNMADD - 256
> -//------------
> -// FMA231: src1 = - src2*src3 + src1
> -def : Pat<(v4f64 (fsub VR256:$src1, (fmul VR256:$src2, (memopv4f64 addr:$src3)))),
> -            (VFNMADDPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
> -
> -// FMA231: src1 = - src2*src3 + src1
> -def : Pat<(v4f64 (fsub VR256:$src1, (fmul VR256:$src2, VR256:$src3))),
> -            (VFNMADDPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
> -
> -//------------
> -// FP double precision FNMADD - 128
> -//------------
> -
> -// FMA231: src1 = - src2*src3 + src1
> -def : Pat<(v2f64 (fsub VR128:$src1, (fmul VR128:$src2, (memopv2f64 addr:$src3)))),
> -            (VFNMADDPDr231m VR128:$src1, VR128:$src2, addr:$src3)>;
> -
> -// FMA231: src1 = - src2*src3 + src1
> -def : Pat<(v2f64 (fsub VR128:$src1, (fmul VR128:$src2, VR128:$src3))),
> -            (VFNMADDPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
> -
> -//------------
> -// FP single precision ADD - 256
> -//------------
> -
> -// FMA231: src1 = src2*src3 + src1
> -def : Pat<(v8f32 (fadd (fmul VR256:$src2, VR256:$src3), VR256:$src1)),
> -            (VFMADDPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
> -
> -// FMA213 : src1 = src2*src1 + src3
> -def : Pat<(v8f32 (fadd (fmul VR256:$src1, VR256:$src2), (memopv8f32 addr:$src3))),
> -            (VFMADDPSr213mY VR256:$src1, VR256:$src2, addr:$src3)>;
> -
> -// FMA231: src1 = src2*src3 + src1
> -def : Pat<(v8f32 (fadd (fmul (memopv8f32 addr:$src3), VR256:$src2), VR256:$src1)),
> -            (VFMADDPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
> -
> -// FMA213: src1 = src2*src1 + src3
> -def : Pat<(v8f32 (fadd (fmul VR256:$src2, VR256:$src1), VR256:$src3)),
> -            (VFMADDPSr213rY VR256:$src1, VR256:$src2, VR256:$src3)>;
> -
> -//------------
> -// FP single precision ADD - 128
> -//------------
> -
> -// FMA231 : src1 = src2*src3 + src1
> -def : Pat<(v4f32 (fadd (fmul VR128:$src2, (memopv4f32 addr:$src3)), VR128:$src1)),
> -            (VFMADDPSr231m VR128:$src1, VR128:$src2, addr:$src3)>;
> -
> -// FMA231 : src1 = src2*src3 + src1
> -def : Pat<(v4f32 (fadd (fmul VR128:$src2, VR128:$src3), VR128:$src1)),
> -        (VFMADDPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
> -
> -//------------
> -// FP single precision SUB - 256
> -//------------
> -// FMA231: src1 = src2*src3 - src1
> -def : Pat<(v8f32 (fsub (fmul VR256:$src2, (memopv8f32 addr:$src3)), VR256:$src1)),
> -            (VFMSUBPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
> -
> -// FMA231: src1 = src2*src3 - src1
> -def : Pat<(v8f32 (fsub (fmul VR256:$src2, VR256:$src3), VR256:$src1)),
> -            (VFMSUBPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
> -
> -//------------
> -// FP single precision SUB - 128
> -//------------
> -// FMA231 : src1 = src2*src3 - src1
> -def : Pat<(v4f32 (fsub (fmul VR128:$src2, (memopv4f32 addr:$src3)), VR128:$src1)),
> -            (VFMSUBPSr231m VR128:$src1, VR128:$src2, addr:$src3)>;
> -
> -// FMA231 : src1 = src2*src3 - src1
> -def : Pat<(v4f32 (fsub (fmul VR128:$src2, VR128:$src3), VR128:$src1)),
> -            (VFMSUBPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
> -
> -//------------
> -// FP single precision FNMADD - 256
> -//------------
> -// FMA231: src1 = - src2*src3 + src1
> -def : Pat<(v8f32 (fsub VR256:$src1, (fmul VR256:$src2, (memopv8f32 addr:$src3)))),
> -            (VFNMADDPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
> -
> -// FMA231: src1 = - src2*src3 + src1
> -def : Pat<(v8f32 (fsub VR256:$src1, (fmul VR256:$src2, VR256:$src3))),
> -            (VFNMADDPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
> -
> -//------------
> -// FP single precision FNMADD - 128
> -//------------
> -
> -// FMA231 : src1 = src2*src3 - src1
> -def : Pat<(v4f32 (fsub VR128:$src1, (fmul VR128:$src2, (memopv4f32 addr:$src3)))),
> -            (VFNMADDPSr231m VR128:$src1, VR128:$src2, addr:$src3)>;
> -
> -// FMA231 : src1 = src2*src3 - src1
> -def : Pat<(v4f32 (fsub VR128:$src1, (fmul VR128:$src2, VR128:$src3))),
> -            (VFNMADDPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
> -
> -} // HasFMA3
> -
> -//------------------------------
> -// SCALAR
> -//------------------------------
> 
> let Constraints = "$src1 = $dst" in {
> multiclass fma3s_rm<bits<8> opc, string OpcodeStr, X86MemOperand x86memop,
> @@ -328,62 +172,6 @@
>                              int_x86_fma4_vfnmsub_sd>, VEX_W, VEX_LIG;
> 
> 
> -let Predicates = [HasFMA3], AddedComplexity = 20 in {
> -
> -//------------
> -// FP  scalar ADD
> -//------------
> -
> -
> -// FMADD231 : src1 = src2*src3 + src1
> -def : Pat<(f32 (fadd (fmul FR32:$src2, FR32:$src3), FR32:$src1)),
> -            (VFMADDSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>;
> -
> -def : Pat<(f32 (fadd (fmul FR32:$src2, (loadf32 addr:$src3)), FR32:$src1)),
> -            (VFMADDSSr231m FR32:$src1, FR32:$src2, addr:$src3)>;
> -
> -def : Pat<(f64 (fadd (fmul FR64:$src2, FR64:$src3), FR64:$src1)),
> -            (VFMADDSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>;
> -
> -def : Pat<(f64 (fadd (fmul FR64:$src2, (loadf64 addr:$src3)), FR64:$src1)),
> -            (VFMADDSDr231m FR64:$src1, FR64:$src2, addr:$src3)>;
> -
> -
> -
> -//------------
> -// FP  scalar SUB src2*src3 - src1
> -//------------
> -
> -def : Pat<(f32 (fsub (fmul FR32:$src2, FR32:$src3), FR32:$src1)),
> -            (VFMSUBSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>;
> -
> -def : Pat<(f32 (fsub (fmul FR32:$src2, (loadf32 addr:$src3)), FR32:$src1)),
> -            (VFMSUBSSr231m FR32:$src1, FR32:$src2, addr:$src3)>;
> -
> -def : Pat<(f64 (fsub (fmul FR64:$src2, FR64:$src3), FR64:$src1)),
> -            (VFMSUBSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>;
> -
> -def : Pat<(f64 (fsub (fmul FR64:$src2, (loadf64 addr:$src3)), FR64:$src1)),
> -            (VFMSUBSDr231m FR64:$src1, FR64:$src2, addr:$src3)>;
> -
> -//------------
> -// FP  scalar NADD src1 - src2*src3
> -//------------
> -
> -def : Pat<(f32 (fsub FR32:$src1, (fmul FR32:$src2, FR32:$src3))),
> -            (VFNMADDSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>;
> -
> -def : Pat<(f32 (fsub FR32:$src1, (fmul FR32:$src2, (loadf32 addr:$src3)))),
> -            (VFNMADDSSr231m FR32:$src1, FR32:$src2, addr:$src3)>;
> -
> -def : Pat<(f64 (fsub FR64:$src1, (fmul FR64:$src2, FR64:$src3))),
> -            (VFNMADDSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>;
> -
> -def : Pat<(f64 (fsub FR64:$src1, (fmul FR64:$src2, (loadf64 addr:$src3)))),
> -            (VFNMADDSDr231m FR64:$src1, FR64:$src2, addr:$src3)>;
> -
> -} // HasFMA3
> -
> //===----------------------------------------------------------------------===//
> // FMA4 - AMD 4 operand Fused Multiply-Add instructions
> //===----------------------------------------------------------------------===//
> 
> Removed: llvm/trunk/test/CodeGen/X86/fma3.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma3.ll?rev=157803&view=auto
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/fma3.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/fma3.ll (removed)
> @@ -1,66 +0,0 @@
> -; RUN: llc < %s -mtriple=x86_64-pc-win32 -mcpu=core-avx2 -mattr=avx2,+fma3 | FileCheck %s
> -
> -define <4 x float> @test_x86_fmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
> -  ; CHECK: fmadd231ps {{.*\(%r.*}}, %xmm
> -  %x = fmul <4 x float> %a0, %a1
> -  %res = fadd <4 x float> %x, %a2
> -  ret <4 x float> %res
> -}
> -
> -define <4 x float> @test_x86_fmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
> -  ; CHECK: fmsub231ps {{.*\(%r.*}}, %xmm
> -  %x = fmul <4 x float> %a0, %a1
> -  %res = fsub <4 x float> %x, %a2
> -  ret <4 x float> %res
> -}
> -
> -define <4 x float> @test_x86_fnmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
> -  ; CHECK: fnmadd231ps {{.*\(%r.*}}, %xmm
> -  %x = fmul <4 x float> %a0, %a1
> -  %res = fsub <4 x float> %a2, %x
> -  ret <4 x float> %res
> -}
> -
> -define <8 x float> @test_x86_fmadd_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
> -  ; CHECK: vfmadd213ps	{{.*\(%r.*}}, %ymm
> -  %x = fmul <8 x float> %a0, %a1
> -  %res = fadd <8 x float> %x, %a2
> -  ret <8 x float> %res
> -}
> -
> -define <4 x double> @test_x86_fmadd_pd_y(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
> -  ; CHECK: vfmadd231pd {{.*\(%r.*}}, %ymm
> -  %x = fmul <4 x double> %a0, %a1
> -  %res = fadd <4 x double> %x, %a2
> -  ret <4 x double> %res
> -}
> -
> -
> -define <8 x float> @test_x86_fmsub_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
> -  ; CHECK: fmsub231ps {{.*\(%r.*}}, %ymm
> -  %x = fmul <8 x float> %a0, %a1
> -  %res = fsub <8 x float> %x, %a2
> -  ret <8 x float> %res
> -}
> -
> -define <8 x float> @test_x86_fnmadd_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
> -  ; CHECK: fnmadd231ps {{.*\(%r.*}}, %ymm
> -  %x = fmul <8 x float> %a0, %a1
> -  %res = fsub <8 x float> %a2, %x
> -  ret <8 x float> %res
> -}
> -
> -define float @test_x86_fnmadd_ss(float %a0, float %a1, float %a2) {
> -  ; CHECK: vfnmadd231ss    %xmm1, %xmm0, %xmm2
> -  %x = fmul float %a0, %a1
> -  %res = fsub float %a2, %x
> -  ret float %res
> -}
> -
> -define double @test_x86_fnmadd_sd(double %a0, double %a1, double %a2) {
> -  ; CHECK: vfnmadd231sd    %xmm1, %xmm0, %xmm2
> -  %x = fmul double %a0, %a1
> -  %res = fsub double %a2, %x
> -  ret double %res
> -}
> -
> 
> 
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits

-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20120601/6c5f24b1/attachment.html>


More information about the llvm-commits mailing list