[llvm] r277118 - [AVX512] Add AVX512 run lines to some tests for scalar fma/add/sub/mul/div and regenerate. Follow up commits will bring AVX512 code up to the same quality as AVX/SSE.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 28 23:05:58 PDT 2016


Author: ctopper
Date: Fri Jul 29 01:05:58 2016
New Revision: 277118

URL: http://llvm.org/viewvc/llvm-project?rev=277118&view=rev
Log:
[AVX512] Add AVX512 run lines to some tests for scalar fma/add/sub/mul/div and regenerate. Follow up commits will bring AVX512 code up to the same quality as AVX/SSE.

Modified:
    llvm/trunk/test/CodeGen/X86/fma-scalar-memfold.ll
    llvm/trunk/test/CodeGen/X86/fold-load-binops.ll
    llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith.ll

Modified: llvm/trunk/test/CodeGen/X86/fma-scalar-memfold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma-scalar-memfold.ll?rev=277118&r1=277117&r2=277118&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma-scalar-memfold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma-scalar-memfold.ll Fri Jul 29 01:05:58 2016
@@ -1,6 +1,8 @@
-; RUN: llc < %s -mtriple=x86_64-pc-win32 -mcpu=core-avx2 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mcpu=core-avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2
+; RUN: llc < %s -mcpu=skx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512
 
-attributes #0 = { nounwind }
+target triple = "x86_64-unknown-unknown"
 
 declare <4 x float> @llvm.x86.fma.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>)
 declare <4 x float> @llvm.x86.fma.vfmsub.ss(<4 x float>, <4 x float>, <4 x float>)
@@ -12,12 +14,21 @@ declare <2 x double> @llvm.x86.fma.vfmsu
 declare <2 x double> @llvm.x86.fma.vfnmadd.sd(<2 x double>, <2 x double>, <2 x double>)
 declare <2 x double> @llvm.x86.fma.vfnmsub.sd(<2 x double>, <2 x double>, <2 x double>)
 
-define void @fmadd_aab_ss(float* %a, float* %b) #0 {
-; CHECK-LABEL: fmadd_aab_ss:
-; CHECK:      vmovss (%rcx), %[[XMM:xmm[0-9]+]]
-; CHECK-NEXT: vfmadd213ss (%rdx), %[[XMM]], %[[XMM]]
-; CHECK-NEXT: vmovss %[[XMM]], (%rcx)
-; CHECK-NEXT: ret
+define void @fmadd_aab_ss(float* %a, float* %b) {
+; AVX2-LABEL: fmadd_aab_ss:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vfmadd213ss (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: fmadd_aab_ss:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vfmadd213ss %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovss %xmm0, (%rdi)
+; AVX512-NEXT:    retq
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -37,12 +48,21 @@ define void @fmadd_aab_ss(float* %a, flo
   ret void
 }
 
-define void @fmadd_aba_ss(float* %a, float* %b) #0 {
-; CHECK-LABEL: fmadd_aba_ss:
-; CHECK:      vmovss (%rcx), %[[XMM:xmm[0-9]+]]
-; CHECK-NEXT: vfmadd132ss (%rdx), %[[XMM]], %[[XMM]]
-; CHECK-NEXT: vmovss %[[XMM]], (%rcx)
-; CHECK-NEXT: ret
+define void @fmadd_aba_ss(float* %a, float* %b) {
+; AVX2-LABEL: fmadd_aba_ss:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vfmadd132ss (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: fmadd_aba_ss:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vfmadd213ss %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    vmovss %xmm0, (%rdi)
+; AVX512-NEXT:    retq
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -62,12 +82,21 @@ define void @fmadd_aba_ss(float* %a, flo
   ret void
 }
 
-define void @fmsub_aab_ss(float* %a, float* %b) #0 {
-; CHECK-LABEL: fmsub_aab_ss:
-; CHECK:      vmovss (%rcx), %[[XMM:xmm[0-9]+]]
-; CHECK-NEXT: vfmsub213ss (%rdx), %[[XMM]], %[[XMM]]
-; CHECK-NEXT: vmovss %[[XMM]], (%rcx)
-; CHECK-NEXT: ret
+define void @fmsub_aab_ss(float* %a, float* %b) {
+; AVX2-LABEL: fmsub_aab_ss:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vfmsub213ss (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: fmsub_aab_ss:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vfmsub213ss %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovss %xmm0, (%rdi)
+; AVX512-NEXT:    retq
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -87,12 +116,21 @@ define void @fmsub_aab_ss(float* %a, flo
   ret void
 }
 
-define void @fmsub_aba_ss(float* %a, float* %b) #0 {
-; CHECK-LABEL: fmsub_aba_ss:
-; CHECK:      vmovss (%rcx), %[[XMM:xmm[0-9]+]]
-; CHECK-NEXT: vfmsub132ss (%rdx), %[[XMM]], %[[XMM]]
-; CHECK-NEXT: vmovss %[[XMM]], (%rcx)
-; CHECK-NEXT: ret
+define void @fmsub_aba_ss(float* %a, float* %b) {
+; AVX2-LABEL: fmsub_aba_ss:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vfmsub132ss (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: fmsub_aba_ss:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vfmsub213ss %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    vmovss %xmm0, (%rdi)
+; AVX512-NEXT:    retq
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -112,12 +150,21 @@ define void @fmsub_aba_ss(float* %a, flo
   ret void
 }
 
-define void @fnmadd_aab_ss(float* %a, float* %b) #0 {
-; CHECK-LABEL: fnmadd_aab_ss:
-; CHECK:      vmovss (%rcx), %[[XMM:xmm[0-9]+]]
-; CHECK-NEXT: vfnmadd213ss (%rdx), %[[XMM]], %[[XMM]]
-; CHECK-NEXT: vmovss %[[XMM]], (%rcx)
-; CHECK-NEXT: ret
+define void @fnmadd_aab_ss(float* %a, float* %b) {
+; AVX2-LABEL: fnmadd_aab_ss:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vfnmadd213ss (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: fnmadd_aab_ss:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vfnmadd213ss %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovss %xmm0, (%rdi)
+; AVX512-NEXT:    retq
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -137,12 +184,21 @@ define void @fnmadd_aab_ss(float* %a, fl
   ret void
 }
 
-define void @fnmadd_aba_ss(float* %a, float* %b) #0 {
-; CHECK-LABEL: fnmadd_aba_ss:
-; CHECK:      vmovss (%rcx), %[[XMM:xmm[0-9]+]]
-; CHECK-NEXT: vfnmadd132ss (%rdx), %[[XMM]], %[[XMM]]
-; CHECK-NEXT: vmovss %[[XMM]], (%rcx)
-; CHECK-NEXT: ret
+define void @fnmadd_aba_ss(float* %a, float* %b) {
+; AVX2-LABEL: fnmadd_aba_ss:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vfnmadd132ss (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: fnmadd_aba_ss:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vfnmadd213ss %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    vmovss %xmm0, (%rdi)
+; AVX512-NEXT:    retq
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -162,12 +218,21 @@ define void @fnmadd_aba_ss(float* %a, fl
   ret void
 }
 
-define void @fnmsub_aab_ss(float* %a, float* %b) #0 {
-; CHECK-LABEL: fnmsub_aab_ss:
-; CHECK:      vmovss (%rcx), %[[XMM:xmm[0-9]+]]
-; CHECK-NEXT: vfnmsub213ss (%rdx), %[[XMM]], %[[XMM]]
-; CHECK-NEXT: vmovss %[[XMM]], (%rcx)
-; CHECK-NEXT: ret
+define void @fnmsub_aab_ss(float* %a, float* %b) {
+; AVX2-LABEL: fnmsub_aab_ss:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vfnmsub213ss (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: fnmsub_aab_ss:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vfnmsub213ss %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovss %xmm0, (%rdi)
+; AVX512-NEXT:    retq
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -187,12 +252,21 @@ define void @fnmsub_aab_ss(float* %a, fl
   ret void
 }
 
-define void @fnmsub_aba_ss(float* %a, float* %b) #0 {
-; CHECK-LABEL: fnmsub_aba_ss:
-; CHECK:      vmovss (%rcx), %[[XMM:xmm[0-9]+]]
-; CHECK-NEXT: vfnmsub132ss (%rdx), %[[XMM]], %[[XMM]]
-; CHECK-NEXT: vmovss %[[XMM]], (%rcx)
-; CHECK-NEXT: ret
+define void @fnmsub_aba_ss(float* %a, float* %b) {
+; AVX2-LABEL: fnmsub_aba_ss:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vfnmsub132ss (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: fnmsub_aba_ss:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vfnmsub213ss %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    vmovss %xmm0, (%rdi)
+; AVX512-NEXT:    retq
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -212,12 +286,21 @@ define void @fnmsub_aba_ss(float* %a, fl
   ret void
 }
 
-define void @fmadd_aab_sd(double* %a, double* %b) #0 {
-; CHECK-LABEL: fmadd_aab_sd:
-; CHECK:      vmovsd (%rcx), %[[XMM:xmm[0-9]+]]
-; CHECK-NEXT: vfmadd213sd (%rdx), %[[XMM]], %[[XMM]]
-; CHECK-NEXT: vmovlpd %[[XMM]], (%rcx)
-; CHECK-NEXT: ret
+define void @fmadd_aab_sd(double* %a, double* %b) {
+; AVX2-LABEL: fmadd_aab_sd:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vfmadd213sd (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vmovlpd %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: fmadd_aab_sd:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vfmadd213sd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovlpd %xmm0, (%rdi)
+; AVX512-NEXT:    retq
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av  = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -233,12 +316,21 @@ define void @fmadd_aab_sd(double* %a, do
   ret void
 }
 
-define void @fmadd_aba_sd(double* %a, double* %b) #0 {
-; CHECK-LABEL: fmadd_aba_sd:
-; CHECK:      vmovsd (%rcx), %[[XMM:xmm[0-9]+]]
-; CHECK-NEXT: vfmadd132sd (%rdx), %[[XMM]], %[[XMM]]
-; CHECK-NEXT: vmovlpd %[[XMM]], (%rcx)
-; CHECK-NEXT: ret
+define void @fmadd_aba_sd(double* %a, double* %b) {
+; AVX2-LABEL: fmadd_aba_sd:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vfmadd132sd (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vmovlpd %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: fmadd_aba_sd:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vfmadd213sd %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    vmovlpd %xmm0, (%rdi)
+; AVX512-NEXT:    retq
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av  = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -254,12 +346,21 @@ define void @fmadd_aba_sd(double* %a, do
   ret void
 }
 
-define void @fmsub_aab_sd(double* %a, double* %b) #0 {
-; CHECK-LABEL: fmsub_aab_sd:
-; CHECK:      vmovsd (%rcx), %[[XMM:xmm[0-9]+]]
-; CHECK-NEXT: vfmsub213sd (%rdx), %[[XMM]], %[[XMM]]
-; CHECK-NEXT: vmovlpd %[[XMM]], (%rcx)
-; CHECK-NEXT: ret
+define void @fmsub_aab_sd(double* %a, double* %b) {
+; AVX2-LABEL: fmsub_aab_sd:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vfmsub213sd (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vmovlpd %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: fmsub_aab_sd:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vfmsub213sd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovlpd %xmm0, (%rdi)
+; AVX512-NEXT:    retq
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av  = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -275,12 +376,21 @@ define void @fmsub_aab_sd(double* %a, do
   ret void
 }
 
-define void @fmsub_aba_sd(double* %a, double* %b) #0 {
-; CHECK-LABEL: fmsub_aba_sd:
-; CHECK:      vmovsd (%rcx), %[[XMM:xmm[0-9]+]]
-; CHECK-NEXT: vfmsub132sd (%rdx), %[[XMM]], %[[XMM]]
-; CHECK-NEXT: vmovlpd %[[XMM]], (%rcx)
-; CHECK-NEXT: ret
+define void @fmsub_aba_sd(double* %a, double* %b) {
+; AVX2-LABEL: fmsub_aba_sd:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vfmsub132sd (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vmovlpd %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: fmsub_aba_sd:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vfmsub213sd %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    vmovlpd %xmm0, (%rdi)
+; AVX512-NEXT:    retq
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av  = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -296,12 +406,21 @@ define void @fmsub_aba_sd(double* %a, do
   ret void
 }
 
-define void @fnmadd_aab_sd(double* %a, double* %b) #0 {
-; CHECK-LABEL: fnmadd_aab_sd:
-; CHECK:      vmovsd (%rcx), %[[XMM:xmm[0-9]+]]
-; CHECK-NEXT: vfnmadd213sd (%rdx), %[[XMM]], %[[XMM]]
-; CHECK-NEXT: vmovlpd %[[XMM]], (%rcx)
-; CHECK-NEXT: ret
+define void @fnmadd_aab_sd(double* %a, double* %b) {
+; AVX2-LABEL: fnmadd_aab_sd:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vfnmadd213sd (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vmovlpd %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: fnmadd_aab_sd:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vfnmadd213sd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovlpd %xmm0, (%rdi)
+; AVX512-NEXT:    retq
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av  = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -317,12 +436,21 @@ define void @fnmadd_aab_sd(double* %a, d
   ret void
 }
 
-define void @fnmadd_aba_sd(double* %a, double* %b) #0 {
-; CHECK-LABEL: fnmadd_aba_sd:
-; CHECK:      vmovsd (%rcx), %[[XMM:xmm[0-9]+]]
-; CHECK-NEXT: vfnmadd132sd (%rdx), %[[XMM]], %[[XMM]]
-; CHECK-NEXT: vmovlpd %[[XMM]], (%rcx)
-; CHECK-NEXT: ret
+define void @fnmadd_aba_sd(double* %a, double* %b) {
+; AVX2-LABEL: fnmadd_aba_sd:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vfnmadd132sd (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vmovlpd %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: fnmadd_aba_sd:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vfnmadd213sd %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    vmovlpd %xmm0, (%rdi)
+; AVX512-NEXT:    retq
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av  = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -338,12 +466,21 @@ define void @fnmadd_aba_sd(double* %a, d
   ret void
 }
 
-define void @fnmsub_aab_sd(double* %a, double* %b) #0 {
-; CHECK-LABEL: fnmsub_aab_sd:
-; CHECK:      vmovsd (%rcx), %[[XMM:xmm[0-9]+]]
-; CHECK-NEXT: vfnmsub213sd (%rdx), %[[XMM]], %[[XMM]]
-; CHECK-NEXT: vmovlpd %[[XMM]], (%rcx)
-; CHECK-NEXT: ret
+define void @fnmsub_aab_sd(double* %a, double* %b) {
+; AVX2-LABEL: fnmsub_aab_sd:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vfnmsub213sd (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vmovlpd %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: fnmsub_aab_sd:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vfnmsub213sd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovlpd %xmm0, (%rdi)
+; AVX512-NEXT:    retq
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av  = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -359,12 +496,21 @@ define void @fnmsub_aab_sd(double* %a, d
   ret void
 }
 
-define void @fnmsub_aba_sd(double* %a, double* %b) #0 {
-; CHECK-LABEL: fnmsub_aba_sd:
-; CHECK:      vmovsd (%rcx), %[[XMM:xmm[0-9]+]]
-; CHECK-NEXT: vfnmsub132sd (%rdx), %[[XMM]], %[[XMM]]
-; CHECK-NEXT: vmovlpd %[[XMM]], (%rcx)
-; CHECK-NEXT: ret
+define void @fnmsub_aba_sd(double* %a, double* %b) {
+; AVX2-LABEL: fnmsub_aba_sd:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vfnmsub132sd (%rsi), %xmm0, %xmm0
+; AVX2-NEXT:    vmovlpd %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: fnmsub_aba_sd:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vfnmsub213sd %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    vmovlpd %xmm0, (%rdi)
+; AVX512-NEXT:    retq
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av  = insertelement <2 x double> %av0, double 0.000000e+00, i32 1

Modified: llvm/trunk/test/CodeGen/X86/fold-load-binops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-load-binops.ll?rev=277118&r1=277117&r2=277118&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-load-binops.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-load-binops.ll Fri Jul 29 01:05:58 2016
@@ -1,6 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE
-; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefix=AVX
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx512f < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX512
 
 ; Verify that we're folding the load into the math instruction.
 ; This pattern is generated out of the simplest intrinsics usage:
@@ -12,10 +13,16 @@ define <4 x float> @addss(<4 x float> %v
 ; SSE-NEXT:    addss (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: addss:
-; AVX:       # BB#0:
-; AVX-NEXT:    vaddss (%rdi), %xmm0, %xmm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: addss:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vaddss (%rdi), %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX512-LABEL: addss:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
     %a = extractelement <4 x float> %va, i32 0
     %b = load float, float* %pb
     %r = fadd float %a, %b
@@ -29,10 +36,16 @@ define <2 x double> @addsd(<2 x double>
 ; SSE-NEXT:    addsd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: addsd:
-; AVX:       # BB#0:
-; AVX-NEXT:    vaddsd (%rdi), %xmm0, %xmm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: addsd:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vaddsd (%rdi), %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX512-LABEL: addsd:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
     %a = extractelement <2 x double> %va, i32 0
     %b = load double, double* %pb
     %r = fadd double %a, %b
@@ -46,10 +59,16 @@ define <4 x float> @subss(<4 x float> %v
 ; SSE-NEXT:    subss (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: subss:
-; AVX:       # BB#0:
-; AVX-NEXT:    vsubss (%rdi), %xmm0, %xmm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: subss:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vsubss (%rdi), %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX512-LABEL: subss:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vsubss %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
     %a = extractelement <4 x float> %va, i32 0
     %b = load float, float* %pb
     %r = fsub float %a, %b
@@ -63,10 +82,16 @@ define <2 x double> @subsd(<2 x double>
 ; SSE-NEXT:    subsd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: subsd:
-; AVX:       # BB#0:
-; AVX-NEXT:    vsubsd (%rdi), %xmm0, %xmm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: subsd:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vsubsd (%rdi), %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX512-LABEL: subsd:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
     %a = extractelement <2 x double> %va, i32 0
     %b = load double, double* %pb
     %r = fsub double %a, %b
@@ -80,10 +105,16 @@ define <4 x float> @mulss(<4 x float> %v
 ; SSE-NEXT:    mulss (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: mulss:
-; AVX:       # BB#0:
-; AVX-NEXT:    vmulss (%rdi), %xmm0, %xmm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: mulss:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vmulss (%rdi), %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX512-LABEL: mulss:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vmulss %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
     %a = extractelement <4 x float> %va, i32 0
     %b = load float, float* %pb
     %r = fmul float %a, %b
@@ -97,10 +128,16 @@ define <2 x double> @mulsd(<2 x double>
 ; SSE-NEXT:    mulsd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: mulsd:
-; AVX:       # BB#0:
-; AVX-NEXT:    vmulsd (%rdi), %xmm0, %xmm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: mulsd:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vmulsd (%rdi), %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX512-LABEL: mulsd:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vmulsd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
     %a = extractelement <2 x double> %va, i32 0
     %b = load double, double* %pb
     %r = fmul double %a, %b
@@ -114,10 +151,16 @@ define <4 x float> @divss(<4 x float> %v
 ; SSE-NEXT:    divss (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: divss:
-; AVX:       # BB#0:
-; AVX-NEXT:    vdivss (%rdi), %xmm0, %xmm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: divss:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vdivss (%rdi), %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX512-LABEL: divss:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vdivss %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
     %a = extractelement <4 x float> %va, i32 0
     %b = load float, float* %pb
     %r = fdiv float %a, %b
@@ -131,10 +174,16 @@ define <2 x double> @divsd(<2 x double>
 ; SSE-NEXT:    divsd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: divsd:
-; AVX:       # BB#0:
-; AVX-NEXT:    vdivsd (%rdi), %xmm0, %xmm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: divsd:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vdivsd (%rdi), %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX512-LABEL: divsd:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vdivsd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
     %a = extractelement <2 x double> %va, i32 0
     %b = load double, double* %pb
     %r = fdiv double %a, %b

Modified: llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith.ll?rev=277118&r1=277117&r2=277118&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-scalar-fp-arith.ll Fri Jul 29 01:05:58 2016
@@ -1,6 +1,8 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck --check-prefix=SSE --check-prefix=SSE2 %s
 ; RUN: llc -mcpu=x86-64 -mattr=+sse4.1 < %s | FileCheck --check-prefix=SSE --check-prefix=SSE41 %s
-; RUN: llc -mcpu=x86-64 -mattr=+avx < %s | FileCheck --check-prefix=AVX %s
+; RUN: llc -mcpu=x86-64 -mattr=+avx < %s | FileCheck --check-prefix=AVX --check-prefix=AVX1 %s
+; RUN: llc -mcpu=x86-64 -mattr=+avx512f < %s | FileCheck --check-prefix=AVX --check-prefix=AVX512 %s
 
 target triple = "x86_64-unknown-unknown"
 
@@ -79,15 +81,15 @@ define <4 x float> @test_div_ss(<4 x flo
 define <4 x float> @test_sqrt_ss(<4 x float> %a) {
 ; SSE2-LABEL: test_sqrt_ss:
 ; SSE2:       # BB#0:
-; SSE2-NEXT:   sqrtss %xmm0, %xmm1
-; SSE2-NEXT:   movss %xmm1, %xmm0
-; SSE2-NEXT:   retq
+; SSE2-NEXT:    sqrtss %xmm0, %xmm1
+; SSE2-NEXT:    movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test_sqrt_ss:
 ; SSE41:       # BB#0:
-; SSE41-NEXT:  sqrtss %xmm0, %xmm1
-; SSE41-NEXT:  blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
-; SSE41-NEXT:  retq
+; SSE41-NEXT:    sqrtss %xmm0, %xmm1
+; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test_sqrt_ss:
 ; AVX:       # BB#0:
@@ -173,13 +175,13 @@ define <2 x double> @test_sqrt_sd(<2 x d
 ; SSE-LABEL: test_sqrt_sd:
 ; SSE:       # BB#0:
 ; SSE-NEXT:    sqrtsd %xmm0, %xmm1
-; SSE-NEXT:    movsd %xmm1, %xmm0
+; SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_sqrt_sd:
 ; AVX:       # BB#0:
 ; AVX-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm1
-; AVX-NEXT:    vmovsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; AVX-NEXT:    retq
   %1 = extractelement <2 x double> %a, i32 0
   %2 = call double @llvm.sqrt.f64(double %1)




More information about the llvm-commits mailing list