[llvm-commits] [llvm] r147361 - in /llvm/trunk: lib/Target/X86/X86InstrFMA.td test/CodeGen/X86/fma4-intrinsics-x86_64.ll
Craig Topper
craig.topper at gmail.com
Thu Dec 29 18:18:37 PST 2011
Author: ctopper
Date: Thu Dec 29 20:18:36 2011
New Revision: 147361
URL: http://llvm.org/viewvc/llvm-project?rev=147361&view=rev
Log:
Change FMA4 memory forms to use memopv* instead of alignedloadv*. No need to force alignment on these instructions. Add a couple testcases for memory forms.
Modified:
llvm/trunk/lib/Target/X86/X86InstrFMA.td
llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86_64.ll
Modified: llvm/trunk/lib/Target/X86/X86InstrFMA.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrFMA.td?rev=147361&r1=147360&r2=147361&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrFMA.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrFMA.td Thu Dec 29 20:18:36 2011
@@ -192,38 +192,36 @@
def : Pat<(int_x86_fma4_vfmadd_ps VR128:$src1, VR128:$src2, VR128:$src3),
(VFMADDPS4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfmadd_ps VR128:$src1, VR128:$src2,
- (alignedloadv4f32 addr:$src3)),
+ (memopv4f32 addr:$src3)),
(VFMADDPS4rm VR128:$src1, VR128:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfmadd_ps VR128:$src1, (alignedloadv4f32 addr:$src2),
+def : Pat<(int_x86_fma4_vfmadd_ps VR128:$src1, (memopv4f32 addr:$src2),
VR128:$src3),
(VFMADDPS4mr VR128:$src1, addr:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfmadd_pd VR128:$src1, VR128:$src2, VR128:$src3),
(VFMADDPD4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfmadd_pd VR128:$src1, VR128:$src2,
- (alignedloadv2f64 addr:$src3)),
+ (memopv2f64 addr:$src3)),
(VFMADDPD4rm VR128:$src1, VR128:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfmadd_pd VR128:$src1, (alignedloadv2f64 addr:$src2),
+def : Pat<(int_x86_fma4_vfmadd_pd VR128:$src1, (memopv2f64 addr:$src2),
VR128:$src3),
(VFMADDPD4mr VR128:$src1, addr:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfmadd_ps_256 VR256:$src1, VR256:$src2, VR256:$src3),
(VFMADDPS4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfmadd_ps_256 VR256:$src1, VR256:$src2,
- (alignedloadv8f32 addr:$src3)),
+ (memopv8f32 addr:$src3)),
(VFMADDPS4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfmadd_ps_256 VR256:$src1,
- (alignedloadv8f32 addr:$src2),
+def : Pat<(int_x86_fma4_vfmadd_ps_256 VR256:$src1, (memopv8f32 addr:$src2),
VR256:$src3),
(VFMADDPS4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfmadd_pd_256 VR256:$src1, VR256:$src2, VR256:$src3),
(VFMADDPD4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfmadd_pd_256 VR256:$src1, VR256:$src2,
- (alignedloadv4f64 addr:$src3)),
+ (memopv4f64 addr:$src3)),
(VFMADDPD4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfmadd_pd_256 VR256:$src1,
- (alignedloadv4f64 addr:$src2),
+def : Pat<(int_x86_fma4_vfmadd_pd_256 VR256:$src1, (memopv4f64 addr:$src2),
VR256:$src3),
(VFMADDPD4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
@@ -245,38 +243,36 @@
def : Pat<(int_x86_fma4_vfmsub_ps VR128:$src1, VR128:$src2, VR128:$src3),
(VFMSUBPS4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfmsub_ps VR128:$src1, VR128:$src2,
- (alignedloadv4f32 addr:$src3)),
+ (memopv4f32 addr:$src3)),
(VFMSUBPS4rm VR128:$src1, VR128:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfmsub_ps VR128:$src1, (alignedloadv4f32 addr:$src2),
+def : Pat<(int_x86_fma4_vfmsub_ps VR128:$src1, (memopv4f32 addr:$src2),
VR128:$src3),
(VFMSUBPS4mr VR128:$src1, addr:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfmsub_pd VR128:$src1, VR128:$src2, VR128:$src3),
(VFMSUBPD4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfmsub_pd VR128:$src1, VR128:$src2,
- (alignedloadv2f64 addr:$src3)),
+ (memopv2f64 addr:$src3)),
(VFMSUBPD4rm VR128:$src1, VR128:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfmsub_pd VR128:$src1, (alignedloadv2f64 addr:$src2),
+def : Pat<(int_x86_fma4_vfmsub_pd VR128:$src1, (memopv2f64 addr:$src2),
VR128:$src3),
(VFMSUBPD4mr VR128:$src1, addr:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfmsub_ps_256 VR256:$src1, VR256:$src2, VR256:$src3),
(VFMSUBPS4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfmsub_ps_256 VR256:$src1, VR256:$src2,
- (alignedloadv8f32 addr:$src3)),
+ (memopv8f32 addr:$src3)),
(VFMSUBPS4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfmsub_ps_256 VR256:$src1,
- (alignedloadv8f32 addr:$src2),
+def : Pat<(int_x86_fma4_vfmsub_ps_256 VR256:$src1, (memopv8f32 addr:$src2),
VR256:$src3),
(VFMSUBPS4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfmsub_pd_256 VR256:$src1, VR256:$src2, VR256:$src3),
(VFMSUBPD4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfmsub_pd_256 VR256:$src1, VR256:$src2,
- (alignedloadv4f64 addr:$src3)),
+ (memopv4f64 addr:$src3)),
(VFMSUBPD4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfmsub_pd_256 VR256:$src1,
- (alignedloadv4f64 addr:$src2),
+def : Pat<(int_x86_fma4_vfmsub_pd_256 VR256:$src1, (memopv4f64 addr:$src2),
VR256:$src3),
(VFMSUBPD4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
@@ -298,38 +294,36 @@
def : Pat<(int_x86_fma4_vfnmadd_ps VR128:$src1, VR128:$src2, VR128:$src3),
(VFNMADDPS4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfnmadd_ps VR128:$src1, VR128:$src2,
- (alignedloadv4f32 addr:$src3)),
+ (memopv4f32 addr:$src3)),
(VFNMADDPS4rm VR128:$src1, VR128:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfnmadd_ps VR128:$src1, (alignedloadv4f32 addr:$src2),
+def : Pat<(int_x86_fma4_vfnmadd_ps VR128:$src1, (memopv4f32 addr:$src2),
VR128:$src3),
(VFNMADDPS4mr VR128:$src1, addr:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfnmadd_pd VR128:$src1, VR128:$src2, VR128:$src3),
(VFNMADDPD4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfnmadd_pd VR128:$src1, VR128:$src2,
- (alignedloadv2f64 addr:$src3)),
+ (memopv2f64 addr:$src3)),
(VFNMADDPD4rm VR128:$src1, VR128:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfnmadd_pd VR128:$src1, (alignedloadv2f64 addr:$src2),
+def : Pat<(int_x86_fma4_vfnmadd_pd VR128:$src1, (memopv2f64 addr:$src2),
VR128:$src3),
(VFNMADDPD4mr VR128:$src1, addr:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfnmadd_ps_256 VR256:$src1, VR256:$src2, VR256:$src3),
(VFNMADDPS4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfnmadd_ps_256 VR256:$src1, VR256:$src2,
- (alignedloadv8f32 addr:$src3)),
+ (memopv8f32 addr:$src3)),
(VFNMADDPS4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfnmadd_ps_256 VR256:$src1,
- (alignedloadv8f32 addr:$src2),
+def : Pat<(int_x86_fma4_vfnmadd_ps_256 VR256:$src1, (memopv8f32 addr:$src2),
VR256:$src3),
(VFNMADDPS4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfnmadd_pd_256 VR256:$src1, VR256:$src2, VR256:$src3),
(VFNMADDPD4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfnmadd_pd_256 VR256:$src1, VR256:$src2,
- (alignedloadv4f64 addr:$src3)),
+ (memopv4f64 addr:$src3)),
(VFNMADDPD4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfnmadd_pd_256 VR256:$src1,
- (alignedloadv4f64 addr:$src2),
+def : Pat<(int_x86_fma4_vfnmadd_pd_256 VR256:$src1, (memopv4f64 addr:$src2),
VR256:$src3),
(VFNMADDPD4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
@@ -351,38 +345,38 @@
def : Pat<(int_x86_fma4_vfnmsub_ps VR128:$src1, VR128:$src2, VR128:$src3),
(VFNMSUBPS4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfnmsub_ps VR128:$src1, VR128:$src2,
- (alignedloadv4f32 addr:$src3)),
+ (memopv4f32 addr:$src3)),
(VFNMSUBPS4rm VR128:$src1, VR128:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfnmsub_ps VR128:$src1, (alignedloadv4f32 addr:$src2),
+def : Pat<(int_x86_fma4_vfnmsub_ps VR128:$src1, (memopv4f32 addr:$src2),
VR128:$src3),
(VFNMSUBPS4mr VR128:$src1, addr:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfnmsub_pd VR128:$src1, VR128:$src2, VR128:$src3),
(VFNMSUBPD4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfnmsub_pd VR128:$src1, VR128:$src2,
- (alignedloadv2f64 addr:$src3)),
+ (memopv2f64 addr:$src3)),
(VFNMSUBPD4rm VR128:$src1, VR128:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfnmsub_pd VR128:$src1, (alignedloadv2f64 addr:$src2),
+def : Pat<(int_x86_fma4_vfnmsub_pd VR128:$src1, (memopv2f64 addr:$src2),
VR128:$src3),
(VFNMSUBPD4mr VR128:$src1, addr:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfnmsub_ps_256 VR256:$src1, VR256:$src2, VR256:$src3),
(VFNMSUBPS4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfnmsub_ps_256 VR256:$src1, VR256:$src2,
- (alignedloadv8f32 addr:$src3)),
+ (memopv8f32 addr:$src3)),
(VFNMSUBPS4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
def : Pat<(int_x86_fma4_vfnmsub_ps_256 VR256:$src1,
- (alignedloadv8f32 addr:$src2),
+ (memopv8f32 addr:$src2),
VR256:$src3),
(VFNMSUBPS4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfnmsub_pd_256 VR256:$src1, VR256:$src2, VR256:$src3),
(VFNMSUBPD4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfnmsub_pd_256 VR256:$src1, VR256:$src2,
- (alignedloadv4f64 addr:$src3)),
+ (memopv4f64 addr:$src3)),
(VFNMSUBPD4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
def : Pat<(int_x86_fma4_vfnmsub_pd_256 VR256:$src1,
- (alignedloadv4f64 addr:$src2),
+ (memopv4f64 addr:$src2),
VR256:$src3),
(VFNMSUBPD4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
@@ -390,38 +384,36 @@
def : Pat<(int_x86_fma4_vfmaddsub_ps VR128:$src1, VR128:$src2, VR128:$src3),
(VFMADDSUBPS4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfmaddsub_ps VR128:$src1, VR128:$src2,
- (alignedloadv4f32 addr:$src3)),
+ (memopv4f32 addr:$src3)),
(VFMADDSUBPS4rm VR128:$src1, VR128:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfmaddsub_ps VR128:$src1, (alignedloadv4f32 addr:$src2),
+def : Pat<(int_x86_fma4_vfmaddsub_ps VR128:$src1, (memopv4f32 addr:$src2),
VR128:$src3),
(VFMADDSUBPS4mr VR128:$src1, addr:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfmaddsub_pd VR128:$src1, VR128:$src2, VR128:$src3),
(VFMADDSUBPD4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfmaddsub_pd VR128:$src1, VR128:$src2,
- (alignedloadv2f64 addr:$src3)),
+ (memopv2f64 addr:$src3)),
(VFMADDSUBPD4rm VR128:$src1, VR128:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfmaddsub_pd VR128:$src1, (alignedloadv2f64 addr:$src2),
+def : Pat<(int_x86_fma4_vfmaddsub_pd VR128:$src1, (memopv2f64 addr:$src2),
VR128:$src3),
(VFMADDSUBPD4mr VR128:$src1, addr:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfmaddsub_ps_256 VR256:$src1, VR256:$src2, VR256:$src3),
(VFMADDSUBPS4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfmaddsub_ps_256 VR256:$src1, VR256:$src2,
- (alignedloadv8f32 addr:$src3)),
+ (memopv8f32 addr:$src3)),
(VFMADDSUBPS4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfmaddsub_ps_256 VR256:$src1,
- (alignedloadv8f32 addr:$src2),
+def : Pat<(int_x86_fma4_vfmaddsub_ps_256 VR256:$src1, (memopv8f32 addr:$src2),
VR256:$src3),
(VFMADDSUBPS4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfmaddsub_pd_256 VR256:$src1, VR256:$src2, VR256:$src3),
(VFMADDSUBPD4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfmaddsub_pd_256 VR256:$src1, VR256:$src2,
- (alignedloadv4f64 addr:$src3)),
+ (memopv4f64 addr:$src3)),
(VFMADDSUBPD4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfmaddsub_pd_256 VR256:$src1,
- (alignedloadv4f64 addr:$src2),
+def : Pat<(int_x86_fma4_vfmaddsub_pd_256 VR256:$src1, (memopv4f64 addr:$src2),
VR256:$src3),
(VFMADDSUBPD4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
@@ -429,37 +421,35 @@
def : Pat<(int_x86_fma4_vfmsubadd_ps VR128:$src1, VR128:$src2, VR128:$src3),
(VFMSUBADDPS4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfmsubadd_ps VR128:$src1, VR128:$src2,
- (alignedloadv4f32 addr:$src3)),
+ (memopv4f32 addr:$src3)),
(VFMSUBADDPS4rm VR128:$src1, VR128:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfmsubadd_ps VR128:$src1, (alignedloadv4f32 addr:$src2),
+def : Pat<(int_x86_fma4_vfmsubadd_ps VR128:$src1, (memopv4f32 addr:$src2),
VR128:$src3),
(VFMSUBADDPS4mr VR128:$src1, addr:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfmsubadd_pd VR128:$src1, VR128:$src2, VR128:$src3),
(VFMSUBADDPD4rr VR128:$src1, VR128:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfmsubadd_pd VR128:$src1, VR128:$src2,
- (alignedloadv2f64 addr:$src3)),
+ (memopv2f64 addr:$src3)),
(VFMSUBADDPD4rm VR128:$src1, VR128:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfmsubadd_pd VR128:$src1, (alignedloadv2f64 addr:$src2),
+def : Pat<(int_x86_fma4_vfmsubadd_pd VR128:$src1, (memopv2f64 addr:$src2),
VR128:$src3),
(VFMSUBADDPD4mr VR128:$src1, addr:$src2, VR128:$src3)>;
def : Pat<(int_x86_fma4_vfmsubadd_ps_256 VR256:$src1, VR256:$src2, VR256:$src3),
(VFMSUBADDPS4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfmsubadd_ps_256 VR256:$src1, VR256:$src2,
- (alignedloadv8f32 addr:$src3)),
+ (memopv8f32 addr:$src3)),
(VFMSUBADDPS4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfmsubadd_ps_256 VR256:$src1,
- (alignedloadv8f32 addr:$src2),
+def : Pat<(int_x86_fma4_vfmsubadd_ps_256 VR256:$src1, (memopv8f32 addr:$src2),
VR256:$src3),
(VFMSUBADDPS4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfmsubadd_pd_256 VR256:$src1, VR256:$src2, VR256:$src3),
(VFMSUBADDPD4rrY VR256:$src1, VR256:$src2, VR256:$src3)>;
def : Pat<(int_x86_fma4_vfmsubadd_pd_256 VR256:$src1, VR256:$src2,
- (alignedloadv4f64 addr:$src3)),
+ (memopv4f64 addr:$src3)),
(VFMSUBADDPD4rmY VR256:$src1, VR256:$src2, addr:$src3)>;
-def : Pat<(int_x86_fma4_vfmsubadd_pd_256 VR256:$src1,
- (alignedloadv4f64 addr:$src2),
+def : Pat<(int_x86_fma4_vfmsubadd_pd_256 VR256:$src1, (memopv4f64 addr:$src2),
VR256:$src3),
(VFMSUBADDPD4mrY VR256:$src1, addr:$src2, VR256:$src3)>;
Modified: llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86_64.ll?rev=147361&r1=147360&r2=147361&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86_64.ll Thu Dec 29 20:18:36 2011
@@ -48,6 +48,18 @@
%res = call < 4 x float > @llvm.x86.fma4.vfmadd.ps(< 4 x float > %a0, < 4 x float > %a1, < 4 x float > %a2) ; <i64> [#uses=1]
ret < 4 x float > %res
}
+define < 4 x float > @test_x86_fma4_vfmadd_ps_load(< 4 x float > %a0, < 4 x float > %a1, < 4 x float >* %a2) {
+ ; CHECK: vfmaddps (%{{.*}})
+ %x = load <4 x float>* %a2
+ %res = call < 4 x float > @llvm.x86.fma4.vfmadd.ps(< 4 x float > %a0, < 4 x float > %a1, < 4 x float > %x) ; <i64> [#uses=1]
+ ret < 4 x float > %res
+}
+define < 4 x float > @test_x86_fma4_vfmadd_ps_load2(< 4 x float > %a0, < 4 x float >* %a1, < 4 x float > %a2) {
+ ; CHECK: vfmaddps %{{.*}}, (%{{.*}})
+ %x = load <4 x float>* %a1
+ %res = call < 4 x float > @llvm.x86.fma4.vfmadd.ps(< 4 x float > %a0, < 4 x float > %x, < 4 x float > %a2) ; <i64> [#uses=1]
+ ret < 4 x float > %res
+}
declare < 4 x float > @llvm.x86.fma4.vfmadd.ps(< 4 x float >, < 4 x float >, < 4 x float >) nounwind readnone
define < 2 x double > @test_x86_fma4_vfmadd_pd(< 2 x double > %a0, < 2 x double > %a1, < 2 x double > %a2) {
@@ -55,6 +67,18 @@
%res = call < 2 x double > @llvm.x86.fma4.vfmadd.pd(< 2 x double > %a0, < 2 x double > %a1, < 2 x double > %a2) ; <i64> [#uses=1]
ret < 2 x double > %res
}
+define < 2 x double > @test_x86_fma4_vfmadd_pd_load(< 2 x double > %a0, < 2 x double > %a1, < 2 x double >* %a2) {
+ ; CHECK: vfmaddpd (%{{.*}})
+ %x = load <2 x double>* %a2
+ %res = call < 2 x double > @llvm.x86.fma4.vfmadd.pd(< 2 x double > %a0, < 2 x double > %a1, < 2 x double > %x) ; <i64> [#uses=1]
+ ret < 2 x double > %res
+}
+define < 2 x double > @test_x86_fma4_vfmadd_pd_load2(< 2 x double > %a0, < 2 x double >* %a1, < 2 x double > %a2) {
+ ; CHECK: vfmaddpd %{{.*}}, (%{{.*}})
+ %x = load <2 x double>* %a1
+ %res = call < 2 x double > @llvm.x86.fma4.vfmadd.pd(< 2 x double > %a0, < 2 x double > %x, < 2 x double > %a2) ; <i64> [#uses=1]
+ ret < 2 x double > %res
+}
declare < 2 x double > @llvm.x86.fma4.vfmadd.pd(< 2 x double >, < 2 x double >, < 2 x double >) nounwind readnone
define < 8 x float > @test_x86_fma4_vfmadd_ps_256(< 8 x float > %a0, < 8 x float > %a1, < 8 x float > %a2) {
More information about the llvm-commits
mailing list