[llvm] 2398752 - Add missing encoding comments from fma scalar folded intrinsics tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Feb 8 07:42:24 PST 2020


Author: Simon Pilgrim
Date: 2020-02-08T15:23:39Z
New Revision: 2398752f37c5c1a377912d3d6aa416d3b06a4f30

URL: https://github.com/llvm/llvm-project/commit/2398752f37c5c1a377912d3d6aa416d3b06a4f30
DIFF: https://github.com/llvm/llvm-project/commit/2398752f37c5c1a377912d3d6aa416d3b06a4f30.diff

LOG: Add missing encoding comments from fma scalar folded intrinsics tests

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/fma-scalar-memfold.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/fma-scalar-memfold.ll b/llvm/test/CodeGen/X86/fma-scalar-memfold.ll
index 0cdf27076642..06ef8293adfa 100644
--- a/llvm/test/CodeGen/X86/fma-scalar-memfold.ll
+++ b/llvm/test/CodeGen/X86/fma-scalar-memfold.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -disable-peephole -mcpu=core-avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2
-; RUN: llc < %s -disable-peephole -mcpu=skx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512
+; RUN: llc < %s -disable-peephole -mcpu=core-avx2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX2
+; RUN: llc < %s -disable-peephole -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX512
 
 target triple = "x86_64-unknown-unknown"
 
@@ -15,12 +15,23 @@ declare <2 x double> @llvm.x86.fma.vfnmadd.sd(<2 x double>, <2 x double>, <2 x d
 declare <2 x double> @llvm.x86.fma.vfnmsub.sd(<2 x double>, <2 x double>, <2 x double>)
 
 define void @fmadd_aab_ss(float* %a, float* %b) {
-; CHECK-LABEL: fmadd_aab_ss:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    vfmadd213ss (%rsi), %xmm0, %xmm0
-; CHECK-NEXT:    vmovss %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; AVX2-LABEL: fmadd_aab_ss:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
+; AVX2-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vfmadd213ss (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xa9,0x06]
+; AVX2-NEXT:    # xmm0 = (xmm0 * xmm0) + mem
+; AVX2-NEXT:    vmovss %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x11,0x07]
+; AVX2-NEXT:    retq # encoding: [0xc3]
+;
+; AVX512-LABEL: fmadd_aab_ss:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
+; AVX512-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vfmadd213ss (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa9,0x06]
+; AVX512-NEXT:    # xmm0 = (xmm0 * xmm0) + mem
+; AVX512-NEXT:    vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
+; AVX512-NEXT:    retq # encoding: [0xc3]
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -41,12 +52,23 @@ define void @fmadd_aab_ss(float* %a, float* %b) {
 }
 
 define void @fmadd_aba_ss(float* %a, float* %b) {
-; CHECK-LABEL: fmadd_aba_ss:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
-; CHECK-NEXT:    vmovss %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; AVX2-LABEL: fmadd_aba_ss:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
+; AVX2-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vfmadd231ss (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xb9,0x06]
+; AVX2-NEXT:    # xmm0 = (xmm0 * mem) + xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x11,0x07]
+; AVX2-NEXT:    retq # encoding: [0xc3]
+;
+; AVX512-LABEL: fmadd_aba_ss:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
+; AVX512-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vfmadd231ss (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xb9,0x06]
+; AVX512-NEXT:    # xmm0 = (xmm0 * mem) + xmm0
+; AVX512-NEXT:    vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
+; AVX512-NEXT:    retq # encoding: [0xc3]
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -67,12 +89,23 @@ define void @fmadd_aba_ss(float* %a, float* %b) {
 }
 
 define void @fmsub_aab_ss(float* %a, float* %b) {
-; CHECK-LABEL: fmsub_aab_ss:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    vfmsub213ss (%rsi), %xmm0, %xmm0
-; CHECK-NEXT:    vmovss %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; AVX2-LABEL: fmsub_aab_ss:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
+; AVX2-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vfmsub213ss (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xab,0x06]
+; AVX2-NEXT:    # xmm0 = (xmm0 * xmm0) - mem
+; AVX2-NEXT:    vmovss %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x11,0x07]
+; AVX2-NEXT:    retq # encoding: [0xc3]
+;
+; AVX512-LABEL: fmsub_aab_ss:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
+; AVX512-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vfmsub213ss (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xab,0x06]
+; AVX512-NEXT:    # xmm0 = (xmm0 * xmm0) - mem
+; AVX512-NEXT:    vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
+; AVX512-NEXT:    retq # encoding: [0xc3]
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -93,12 +126,23 @@ define void @fmsub_aab_ss(float* %a, float* %b) {
 }
 
 define void @fmsub_aba_ss(float* %a, float* %b) {
-; CHECK-LABEL: fmsub_aba_ss:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    vfmsub231ss {{.*#+}} xmm0 = (xmm0 * mem) - xmm0
-; CHECK-NEXT:    vmovss %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; AVX2-LABEL: fmsub_aba_ss:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
+; AVX2-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vfmsub231ss (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xbb,0x06]
+; AVX2-NEXT:    # xmm0 = (xmm0 * mem) - xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x11,0x07]
+; AVX2-NEXT:    retq # encoding: [0xc3]
+;
+; AVX512-LABEL: fmsub_aba_ss:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
+; AVX512-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vfmsub231ss (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xbb,0x06]
+; AVX512-NEXT:    # xmm0 = (xmm0 * mem) - xmm0
+; AVX512-NEXT:    vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
+; AVX512-NEXT:    retq # encoding: [0xc3]
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -119,12 +163,23 @@ define void @fmsub_aba_ss(float* %a, float* %b) {
 }
 
 define void @fnmadd_aab_ss(float* %a, float* %b) {
-; CHECK-LABEL: fnmadd_aab_ss:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    vfnmadd213ss (%rsi), %xmm0, %xmm0
-; CHECK-NEXT:    vmovss %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; AVX2-LABEL: fnmadd_aab_ss:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
+; AVX2-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vfnmadd213ss (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xad,0x06]
+; AVX2-NEXT:    # xmm0 = -(xmm0 * xmm0) + mem
+; AVX2-NEXT:    vmovss %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x11,0x07]
+; AVX2-NEXT:    retq # encoding: [0xc3]
+;
+; AVX512-LABEL: fnmadd_aab_ss:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
+; AVX512-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vfnmadd213ss (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xad,0x06]
+; AVX512-NEXT:    # xmm0 = -(xmm0 * xmm0) + mem
+; AVX512-NEXT:    vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
+; AVX512-NEXT:    retq # encoding: [0xc3]
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -145,12 +200,23 @@ define void @fnmadd_aab_ss(float* %a, float* %b) {
 }
 
 define void @fnmadd_aba_ss(float* %a, float* %b) {
-; CHECK-LABEL: fnmadd_aba_ss:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    vfnmadd231ss {{.*#+}} xmm0 = -(xmm0 * mem) + xmm0
-; CHECK-NEXT:    vmovss %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; AVX2-LABEL: fnmadd_aba_ss:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
+; AVX2-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vfnmadd231ss (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xbd,0x06]
+; AVX2-NEXT:    # xmm0 = -(xmm0 * mem) + xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x11,0x07]
+; AVX2-NEXT:    retq # encoding: [0xc3]
+;
+; AVX512-LABEL: fnmadd_aba_ss:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
+; AVX512-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vfnmadd231ss (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xbd,0x06]
+; AVX512-NEXT:    # xmm0 = -(xmm0 * mem) + xmm0
+; AVX512-NEXT:    vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
+; AVX512-NEXT:    retq # encoding: [0xc3]
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -171,12 +237,23 @@ define void @fnmadd_aba_ss(float* %a, float* %b) {
 }
 
 define void @fnmsub_aab_ss(float* %a, float* %b) {
-; CHECK-LABEL: fnmsub_aab_ss:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    vfnmsub213ss (%rsi), %xmm0, %xmm0
-; CHECK-NEXT:    vmovss %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; AVX2-LABEL: fnmsub_aab_ss:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
+; AVX2-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vfnmsub213ss (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xaf,0x06]
+; AVX2-NEXT:    # xmm0 = -(xmm0 * xmm0) - mem
+; AVX2-NEXT:    vmovss %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x11,0x07]
+; AVX2-NEXT:    retq # encoding: [0xc3]
+;
+; AVX512-LABEL: fnmsub_aab_ss:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
+; AVX512-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vfnmsub213ss (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xaf,0x06]
+; AVX512-NEXT:    # xmm0 = -(xmm0 * xmm0) - mem
+; AVX512-NEXT:    vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
+; AVX512-NEXT:    retq # encoding: [0xc3]
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -197,12 +274,23 @@ define void @fnmsub_aab_ss(float* %a, float* %b) {
 }
 
 define void @fnmsub_aba_ss(float* %a, float* %b) {
-; CHECK-LABEL: fnmsub_aba_ss:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    vfnmsub231ss {{.*#+}} xmm0 = -(xmm0 * mem) - xmm0
-; CHECK-NEXT:    vmovss %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; AVX2-LABEL: fnmsub_aba_ss:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovss (%rdi), %xmm0 # encoding: [0xc5,0xfa,0x10,0x07]
+; AVX2-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vfnmsub231ss (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0xbf,0x06]
+; AVX2-NEXT:    # xmm0 = -(xmm0 * mem) - xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rdi) # encoding: [0xc5,0xfa,0x11,0x07]
+; AVX2-NEXT:    retq # encoding: [0xc3]
+;
+; AVX512-LABEL: fnmsub_aba_ss:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovss (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
+; AVX512-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vfnmsub231ss (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xbf,0x06]
+; AVX512-NEXT:    # xmm0 = -(xmm0 * mem) - xmm0
+; AVX512-NEXT:    vmovss %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x11,0x07]
+; AVX512-NEXT:    retq # encoding: [0xc3]
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -223,12 +311,23 @@ define void @fnmsub_aba_ss(float* %a, float* %b) {
 }
 
 define void @fmadd_aab_sd(double* %a, double* %b) {
-; CHECK-LABEL: fmadd_aab_sd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT:    vfmadd213sd {{.*#+}} xmm0 = (xmm0 * xmm0) + mem
-; CHECK-NEXT:    vmovsd %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; AVX2-LABEL: fmadd_aab_sd:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
+; AVX2-NEXT:    # xmm0 = mem[0],zero
+; AVX2-NEXT:    vfmadd213sd (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0xa9,0x06]
+; AVX2-NEXT:    # xmm0 = (xmm0 * xmm0) + mem
+; AVX2-NEXT:    vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
+; AVX2-NEXT:    retq # encoding: [0xc3]
+;
+; AVX512-LABEL: fmadd_aab_sd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
+; AVX512-NEXT:    # xmm0 = mem[0],zero
+; AVX512-NEXT:    vfmadd213sd (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa9,0x06]
+; AVX512-NEXT:    # xmm0 = (xmm0 * xmm0) + mem
+; AVX512-NEXT:    vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
+; AVX512-NEXT:    retq # encoding: [0xc3]
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av  = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -245,12 +344,23 @@ define void @fmadd_aab_sd(double* %a, double* %b) {
 }
 
 define void @fmadd_aba_sd(double* %a, double* %b) {
-; CHECK-LABEL: fmadd_aba_sd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
-; CHECK-NEXT:    vmovsd %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; AVX2-LABEL: fmadd_aba_sd:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
+; AVX2-NEXT:    # xmm0 = mem[0],zero
+; AVX2-NEXT:    vfmadd231sd (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0xb9,0x06]
+; AVX2-NEXT:    # xmm0 = (xmm0 * mem) + xmm0
+; AVX2-NEXT:    vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
+; AVX2-NEXT:    retq # encoding: [0xc3]
+;
+; AVX512-LABEL: fmadd_aba_sd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
+; AVX512-NEXT:    # xmm0 = mem[0],zero
+; AVX512-NEXT:    vfmadd231sd (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xb9,0x06]
+; AVX512-NEXT:    # xmm0 = (xmm0 * mem) + xmm0
+; AVX512-NEXT:    vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
+; AVX512-NEXT:    retq # encoding: [0xc3]
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av  = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -267,12 +377,23 @@ define void @fmadd_aba_sd(double* %a, double* %b) {
 }
 
 define void @fmsub_aab_sd(double* %a, double* %b) {
-; CHECK-LABEL: fmsub_aab_sd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT:    vfmsub213sd {{.*#+}} xmm0 = (xmm0 * xmm0) - mem
-; CHECK-NEXT:    vmovsd %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; AVX2-LABEL: fmsub_aab_sd:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
+; AVX2-NEXT:    # xmm0 = mem[0],zero
+; AVX2-NEXT:    vfmsub213sd (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0xab,0x06]
+; AVX2-NEXT:    # xmm0 = (xmm0 * xmm0) - mem
+; AVX2-NEXT:    vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
+; AVX2-NEXT:    retq # encoding: [0xc3]
+;
+; AVX512-LABEL: fmsub_aab_sd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
+; AVX512-NEXT:    # xmm0 = mem[0],zero
+; AVX512-NEXT:    vfmsub213sd (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xab,0x06]
+; AVX512-NEXT:    # xmm0 = (xmm0 * xmm0) - mem
+; AVX512-NEXT:    vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
+; AVX512-NEXT:    retq # encoding: [0xc3]
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av  = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -289,12 +410,23 @@ define void @fmsub_aab_sd(double* %a, double* %b) {
 }
 
 define void @fmsub_aba_sd(double* %a, double* %b) {
-; CHECK-LABEL: fmsub_aba_sd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT:    vfmsub231sd {{.*#+}} xmm0 = (xmm0 * mem) - xmm0
-; CHECK-NEXT:    vmovsd %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; AVX2-LABEL: fmsub_aba_sd:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
+; AVX2-NEXT:    # xmm0 = mem[0],zero
+; AVX2-NEXT:    vfmsub231sd (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0xbb,0x06]
+; AVX2-NEXT:    # xmm0 = (xmm0 * mem) - xmm0
+; AVX2-NEXT:    vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
+; AVX2-NEXT:    retq # encoding: [0xc3]
+;
+; AVX512-LABEL: fmsub_aba_sd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
+; AVX512-NEXT:    # xmm0 = mem[0],zero
+; AVX512-NEXT:    vfmsub231sd (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xbb,0x06]
+; AVX512-NEXT:    # xmm0 = (xmm0 * mem) - xmm0
+; AVX512-NEXT:    vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
+; AVX512-NEXT:    retq # encoding: [0xc3]
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av  = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -311,12 +443,23 @@ define void @fmsub_aba_sd(double* %a, double* %b) {
 }
 
 define void @fnmadd_aab_sd(double* %a, double* %b) {
-; CHECK-LABEL: fnmadd_aab_sd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT:    vfnmadd213sd {{.*#+}} xmm0 = -(xmm0 * xmm0) + mem
-; CHECK-NEXT:    vmovsd %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; AVX2-LABEL: fnmadd_aab_sd:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
+; AVX2-NEXT:    # xmm0 = mem[0],zero
+; AVX2-NEXT:    vfnmadd213sd (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0xad,0x06]
+; AVX2-NEXT:    # xmm0 = -(xmm0 * xmm0) + mem
+; AVX2-NEXT:    vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
+; AVX2-NEXT:    retq # encoding: [0xc3]
+;
+; AVX512-LABEL: fnmadd_aab_sd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
+; AVX512-NEXT:    # xmm0 = mem[0],zero
+; AVX512-NEXT:    vfnmadd213sd (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xad,0x06]
+; AVX512-NEXT:    # xmm0 = -(xmm0 * xmm0) + mem
+; AVX512-NEXT:    vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
+; AVX512-NEXT:    retq # encoding: [0xc3]
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av  = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -333,12 +476,23 @@ define void @fnmadd_aab_sd(double* %a, double* %b) {
 }
 
 define void @fnmadd_aba_sd(double* %a, double* %b) {
-; CHECK-LABEL: fnmadd_aba_sd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT:    vfnmadd231sd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm0
-; CHECK-NEXT:    vmovsd %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; AVX2-LABEL: fnmadd_aba_sd:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
+; AVX2-NEXT:    # xmm0 = mem[0],zero
+; AVX2-NEXT:    vfnmadd231sd (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0xbd,0x06]
+; AVX2-NEXT:    # xmm0 = -(xmm0 * mem) + xmm0
+; AVX2-NEXT:    vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
+; AVX2-NEXT:    retq # encoding: [0xc3]
+;
+; AVX512-LABEL: fnmadd_aba_sd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
+; AVX512-NEXT:    # xmm0 = mem[0],zero
+; AVX512-NEXT:    vfnmadd231sd (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xbd,0x06]
+; AVX512-NEXT:    # xmm0 = -(xmm0 * mem) + xmm0
+; AVX512-NEXT:    vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
+; AVX512-NEXT:    retq # encoding: [0xc3]
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av  = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -355,12 +509,23 @@ define void @fnmadd_aba_sd(double* %a, double* %b) {
 }
 
 define void @fnmsub_aab_sd(double* %a, double* %b) {
-; CHECK-LABEL: fnmsub_aab_sd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT:    vfnmsub213sd {{.*#+}} xmm0 = -(xmm0 * xmm0) - mem
-; CHECK-NEXT:    vmovsd %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; AVX2-LABEL: fnmsub_aab_sd:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
+; AVX2-NEXT:    # xmm0 = mem[0],zero
+; AVX2-NEXT:    vfnmsub213sd (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0xaf,0x06]
+; AVX2-NEXT:    # xmm0 = -(xmm0 * xmm0) - mem
+; AVX2-NEXT:    vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
+; AVX2-NEXT:    retq # encoding: [0xc3]
+;
+; AVX512-LABEL: fnmsub_aab_sd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
+; AVX512-NEXT:    # xmm0 = mem[0],zero
+; AVX512-NEXT:    vfnmsub213sd (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xaf,0x06]
+; AVX512-NEXT:    # xmm0 = -(xmm0 * xmm0) - mem
+; AVX512-NEXT:    vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
+; AVX512-NEXT:    retq # encoding: [0xc3]
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av  = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -377,12 +542,23 @@ define void @fnmsub_aab_sd(double* %a, double* %b) {
 }
 
 define void @fnmsub_aba_sd(double* %a, double* %b) {
-; CHECK-LABEL: fnmsub_aba_sd:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT:    vfnmsub231sd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm0
-; CHECK-NEXT:    vmovsd %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; AVX2-LABEL: fnmsub_aba_sd:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovsd (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x10,0x07]
+; AVX2-NEXT:    # xmm0 = mem[0],zero
+; AVX2-NEXT:    vfnmsub231sd (%rsi), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0xbf,0x06]
+; AVX2-NEXT:    # xmm0 = -(xmm0 * mem) - xmm0
+; AVX2-NEXT:    vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
+; AVX2-NEXT:    retq # encoding: [0xc3]
+;
+; AVX512-LABEL: fnmsub_aba_sd:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovsd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
+; AVX512-NEXT:    # xmm0 = mem[0],zero
+; AVX512-NEXT:    vfnmsub231sd (%rsi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xbf,0x06]
+; AVX512-NEXT:    # xmm0 = -(xmm0 * mem) - xmm0
+; AVX512-NEXT:    vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
+; AVX512-NEXT:    retq # encoding: [0xc3]
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av  = insertelement <2 x double> %av0, double 0.000000e+00, i32 1


        


More information about the llvm-commits mailing list