[llvm] r288008 - [X86][FMA4] Add test cases to demonstrate missed folding opportunities for FMA4 scalar intrinsics.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sun Nov 27 13:36:58 PST 2016
Author: ctopper
Date: Sun Nov 27 15:36:58 2016
New Revision: 288008
URL: http://llvm.org/viewvc/llvm-project?rev=288008&view=rev
Log:
[X86][FMA4] Add test cases to demonstrate missed folding opportunities for FMA4 scalar intrinsics.
Modified:
llvm/trunk/test/CodeGen/X86/fma-scalar-memfold.ll
Modified: llvm/trunk/test/CodeGen/X86/fma-scalar-memfold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma-scalar-memfold.ll?rev=288008&r1=288007&r2=288008&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma-scalar-memfold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma-scalar-memfold.ll Sun Nov 27 15:36:58 2016
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mcpu=core-avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2
; RUN: llc < %s -mcpu=skx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512
+; RUN: llc < %s -mattr=fma4 | FileCheck %s --check-prefix=FMA4
target triple = "x86_64-unknown-unknown"
@@ -21,6 +22,14 @@ define void @fmadd_aab_ss(float* %a, flo
; CHECK-NEXT: vfmadd213ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
; CHECK-NEXT: retq
+;
+; FMA4-LABEL: fmadd_aab_ss:
+; FMA4: # BB#0:
+; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMA4-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; FMA4-NEXT: vfmaddss %xmm1, %xmm0, %xmm0, %xmm0
+; FMA4-NEXT: vmovss %xmm0, (%rdi)
+; FMA4-NEXT: retq
%a.val = load float, float* %a
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -47,6 +56,14 @@ define void @fmadd_aba_ss(float* %a, flo
; CHECK-NEXT: vfmadd132ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
; CHECK-NEXT: retq
+;
+; FMA4-LABEL: fmadd_aba_ss:
+; FMA4: # BB#0:
+; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMA4-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; FMA4-NEXT: vfmaddss %xmm0, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vmovss %xmm0, (%rdi)
+; FMA4-NEXT: retq
%a.val = load float, float* %a
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -73,6 +90,14 @@ define void @fmsub_aab_ss(float* %a, flo
; CHECK-NEXT: vfmsub213ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
; CHECK-NEXT: retq
+;
+; FMA4-LABEL: fmsub_aab_ss:
+; FMA4: # BB#0:
+; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMA4-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; FMA4-NEXT: vfmsubss %xmm1, %xmm0, %xmm0, %xmm0
+; FMA4-NEXT: vmovss %xmm0, (%rdi)
+; FMA4-NEXT: retq
%a.val = load float, float* %a
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -99,6 +124,14 @@ define void @fmsub_aba_ss(float* %a, flo
; CHECK-NEXT: vfmsub132ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
; CHECK-NEXT: retq
+;
+; FMA4-LABEL: fmsub_aba_ss:
+; FMA4: # BB#0:
+; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMA4-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; FMA4-NEXT: vfmsubss %xmm0, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vmovss %xmm0, (%rdi)
+; FMA4-NEXT: retq
%a.val = load float, float* %a
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -125,6 +158,14 @@ define void @fnmadd_aab_ss(float* %a, fl
; CHECK-NEXT: vfnmadd213ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
; CHECK-NEXT: retq
+;
+; FMA4-LABEL: fnmadd_aab_ss:
+; FMA4: # BB#0:
+; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMA4-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; FMA4-NEXT: vfnmaddss %xmm1, %xmm0, %xmm0, %xmm0
+; FMA4-NEXT: vmovss %xmm0, (%rdi)
+; FMA4-NEXT: retq
%a.val = load float, float* %a
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -151,6 +192,14 @@ define void @fnmadd_aba_ss(float* %a, fl
; CHECK-NEXT: vfnmadd132ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
; CHECK-NEXT: retq
+;
+; FMA4-LABEL: fnmadd_aba_ss:
+; FMA4: # BB#0:
+; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMA4-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; FMA4-NEXT: vfnmaddss %xmm0, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vmovss %xmm0, (%rdi)
+; FMA4-NEXT: retq
%a.val = load float, float* %a
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -177,6 +226,14 @@ define void @fnmsub_aab_ss(float* %a, fl
; CHECK-NEXT: vfnmsub213ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
; CHECK-NEXT: retq
+;
+; FMA4-LABEL: fnmsub_aab_ss:
+; FMA4: # BB#0:
+; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMA4-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; FMA4-NEXT: vfnmsubss %xmm1, %xmm0, %xmm0, %xmm0
+; FMA4-NEXT: vmovss %xmm0, (%rdi)
+; FMA4-NEXT: retq
%a.val = load float, float* %a
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -203,6 +260,14 @@ define void @fnmsub_aba_ss(float* %a, fl
; CHECK-NEXT: vfnmsub132ss (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovss %xmm0, (%rdi)
; CHECK-NEXT: retq
+;
+; FMA4-LABEL: fnmsub_aba_ss:
+; FMA4: # BB#0:
+; FMA4-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FMA4-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; FMA4-NEXT: vfnmsubss %xmm0, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vmovss %xmm0, (%rdi)
+; FMA4-NEXT: retq
%a.val = load float, float* %a
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -229,6 +294,14 @@ define void @fmadd_aab_sd(double* %a, do
; CHECK-NEXT: vfmadd213sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
; CHECK-NEXT: retq
+;
+; FMA4-LABEL: fmadd_aab_sd:
+; FMA4: # BB#0:
+; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMA4-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; FMA4-NEXT: vfmaddsd %xmm1, %xmm0, %xmm0, %xmm0
+; FMA4-NEXT: vmovlpd %xmm0, (%rdi)
+; FMA4-NEXT: retq
%a.val = load double, double* %a
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -251,6 +324,14 @@ define void @fmadd_aba_sd(double* %a, do
; CHECK-NEXT: vfmadd132sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
; CHECK-NEXT: retq
+;
+; FMA4-LABEL: fmadd_aba_sd:
+; FMA4: # BB#0:
+; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMA4-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; FMA4-NEXT: vfmaddsd %xmm0, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vmovlpd %xmm0, (%rdi)
+; FMA4-NEXT: retq
%a.val = load double, double* %a
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -273,6 +354,14 @@ define void @fmsub_aab_sd(double* %a, do
; CHECK-NEXT: vfmsub213sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
; CHECK-NEXT: retq
+;
+; FMA4-LABEL: fmsub_aab_sd:
+; FMA4: # BB#0:
+; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMA4-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; FMA4-NEXT: vfmsubsd %xmm1, %xmm0, %xmm0, %xmm0
+; FMA4-NEXT: vmovlpd %xmm0, (%rdi)
+; FMA4-NEXT: retq
%a.val = load double, double* %a
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -295,6 +384,14 @@ define void @fmsub_aba_sd(double* %a, do
; CHECK-NEXT: vfmsub132sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
; CHECK-NEXT: retq
+;
+; FMA4-LABEL: fmsub_aba_sd:
+; FMA4: # BB#0:
+; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMA4-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; FMA4-NEXT: vfmsubsd %xmm0, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vmovlpd %xmm0, (%rdi)
+; FMA4-NEXT: retq
%a.val = load double, double* %a
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -317,6 +414,14 @@ define void @fnmadd_aab_sd(double* %a, d
; CHECK-NEXT: vfnmadd213sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
; CHECK-NEXT: retq
+;
+; FMA4-LABEL: fnmadd_aab_sd:
+; FMA4: # BB#0:
+; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMA4-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; FMA4-NEXT: vfnmaddsd %xmm1, %xmm0, %xmm0, %xmm0
+; FMA4-NEXT: vmovlpd %xmm0, (%rdi)
+; FMA4-NEXT: retq
%a.val = load double, double* %a
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -339,6 +444,14 @@ define void @fnmadd_aba_sd(double* %a, d
; CHECK-NEXT: vfnmadd132sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
; CHECK-NEXT: retq
+;
+; FMA4-LABEL: fnmadd_aba_sd:
+; FMA4: # BB#0:
+; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMA4-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; FMA4-NEXT: vfnmaddsd %xmm0, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vmovlpd %xmm0, (%rdi)
+; FMA4-NEXT: retq
%a.val = load double, double* %a
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -361,6 +474,14 @@ define void @fnmsub_aab_sd(double* %a, d
; CHECK-NEXT: vfnmsub213sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
; CHECK-NEXT: retq
+;
+; FMA4-LABEL: fnmsub_aab_sd:
+; FMA4: # BB#0:
+; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMA4-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; FMA4-NEXT: vfnmsubsd %xmm1, %xmm0, %xmm0, %xmm0
+; FMA4-NEXT: vmovlpd %xmm0, (%rdi)
+; FMA4-NEXT: retq
%a.val = load double, double* %a
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -383,6 +504,14 @@ define void @fnmsub_aba_sd(double* %a, d
; CHECK-NEXT: vfnmsub132sd (%rsi), %xmm0, %xmm0
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
; CHECK-NEXT: retq
+;
+; FMA4-LABEL: fnmsub_aba_sd:
+; FMA4: # BB#0:
+; FMA4-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FMA4-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; FMA4-NEXT: vfnmsubsd %xmm0, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vmovlpd %xmm0, (%rdi)
+; FMA4-NEXT: retq
%a.val = load double, double* %a
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
More information about the llvm-commits
mailing list