[llvm] r362752 - [NFC][CodeGen] Add unary fneg tests to X86/fma4-intrinsics-x86.ll

Cameron McInally via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 6 14:49:59 PDT 2019


Author: mcinally
Date: Thu Jun  6 14:49:59 2019
New Revision: 362752

URL: http://llvm.org/viewvc/llvm-project?rev=362752&view=rev
Log:
[NFC][CodeGen] Add unary fneg tests to X86/fma4-intrinsics-x86.ll

Modified:
    llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86.ll

Modified: llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86.ll?rev=362752&r1=362751&r2=362752&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86.ll Thu Jun  6 14:49:59 2019
@@ -88,6 +88,16 @@ define <4 x float> @test_x86_fma_vfmsub_
   ret <4 x float> %2
 }
 
+define <4 x float> @test_x86_fma_vfmsub_ps_unary_fneg(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfmsub_ps_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfmsubps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x6c,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = fneg <4 x float> %a2
+  %2 = call <4 x float> @llvm.fma.v4f32(<4 x float> %a0, <4 x float> %a1, <4 x float> %1)
+  ret <4 x float> %2
+}
+
 define <2 x double> @test_x86_fma_vfmsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmsub_pd:
 ; CHECK:       # %bb.0:
@@ -98,6 +108,16 @@ define <2 x double> @test_x86_fma_vfmsub
   ret <2 x double> %2
 }
 
+define <2 x double> @test_x86_fma_vfmsub_pd_unary_fneg(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfmsub_pd_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfmsubpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x6d,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = fneg <2 x double> %a2
+  %2 = call <2 x double> @llvm.fma.v2f64(<2 x double> %a0, <2 x double> %a1, <2 x double> %1)
+  ret <2 x double> %2
+}
+
 define <8 x float> @test_x86_fma_vfmsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmsub_ps_256:
 ; CHECK:       # %bb.0:
@@ -108,6 +128,16 @@ define <8 x float> @test_x86_fma_vfmsub_
   ret <8 x float> %2
 }
 
+define <8 x float> @test_x86_fma_vfmsub_ps_256_unary_fneg(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfmsub_ps_256_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfmsubps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x6c,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = fneg <8 x float> %a2
+  %2 = call <8 x float> @llvm.fma.v8f32(<8 x float> %a0, <8 x float> %a1, <8 x float> %1)
+  ret <8 x float> %2
+}
+
 define <4 x double> @test_x86_fma_vfmsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmsub_pd_256:
 ; CHECK:       # %bb.0:
@@ -118,6 +148,16 @@ define <4 x double> @test_x86_fma_vfmsub
   ret <4 x double> %2
 }
 
+define <4 x double> @test_x86_fma_vfmsub_pd_256_unary_fneg(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfmsub_pd_256_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfmsubpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x6d,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = fneg <4 x double> %a2
+  %2 = call <4 x double> @llvm.fma.v4f64(<4 x double> %a0, <4 x double> %a1, <4 x double> %1)
+  ret <4 x double> %2
+}
+
 ; VFNMADD
 define <4 x float> @test_x86_fma_vfnmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfnmadd_ps:
@@ -129,6 +169,16 @@ define <4 x float> @test_x86_fma_vfnmadd
   ret <4 x float> %2
 }
 
+define <4 x float> @test_x86_fma_vfnmadd_ps_unary_fneg(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfnmadd_ps_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfnmaddps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x78,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = fneg <4 x float> %a0
+  %2 = call <4 x float> @llvm.fma.v4f32(<4 x float> %1, <4 x float> %a1, <4 x float> %a2)
+  ret <4 x float> %2
+}
+
 define <2 x double> @test_x86_fma_vfnmadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfnmadd_pd:
 ; CHECK:       # %bb.0:
@@ -139,6 +189,16 @@ define <2 x double> @test_x86_fma_vfnmad
   ret <2 x double> %2
 }
 
+define <2 x double> @test_x86_fma_vfnmadd_pd_unary_fneg(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfnmadd_pd_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfnmaddpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x79,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = fneg <2 x double> %a0
+  %2 = call <2 x double> @llvm.fma.v2f64(<2 x double> %1, <2 x double> %a1, <2 x double> %a2)
+  ret <2 x double> %2
+}
+
 define <8 x float> @test_x86_fma_vfnmadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfnmadd_ps_256:
 ; CHECK:       # %bb.0:
@@ -149,6 +209,16 @@ define <8 x float> @test_x86_fma_vfnmadd
   ret <8 x float> %2
 }
 
+define <8 x float> @test_x86_fma_vfnmadd_ps_256_unary_fneg(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfnmadd_ps_256_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfnmaddps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x78,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = fneg <8 x float> %a0
+  %2 = call <8 x float> @llvm.fma.v8f32(<8 x float> %1, <8 x float> %a1, <8 x float> %a2)
+  ret <8 x float> %2
+}
+
 define <4 x double> @test_x86_fma_vfnmadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfnmadd_pd_256:
 ; CHECK:       # %bb.0:
@@ -159,6 +229,16 @@ define <4 x double> @test_x86_fma_vfnmad
   ret <4 x double> %2
 }
 
+define <4 x double> @test_x86_fma_vfnmadd_pd_256_unary_fneg(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfnmadd_pd_256_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfnmaddpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x79,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = fneg <4 x double> %a0
+  %2 = call <4 x double> @llvm.fma.v4f64(<4 x double> %1, <4 x double> %a1, <4 x double> %a2)
+  ret <4 x double> %2
+}
+
 ; VFNMSUB
 define <4 x float> @test_x86_fma_vfnmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfnmsub_ps:
@@ -171,6 +251,17 @@ define <4 x float> @test_x86_fma_vfnmsub
   ret <4 x float> %3
 }
 
+define <4 x float> @test_x86_fma_vfnmsub_ps_unary_fneg(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfnmsub_ps_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfnmsubps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x7c,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = fneg <4 x float> %a0
+  %2 = fneg <4 x float> %a2
+  %3 = call <4 x float> @llvm.fma.v4f32(<4 x float> %1, <4 x float> %a1, <4 x float> %2)
+  ret <4 x float> %3
+}
+
 define <2 x double> @test_x86_fma_vfnmsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfnmsub_pd:
 ; CHECK:       # %bb.0:
@@ -182,6 +273,17 @@ define <2 x double> @test_x86_fma_vfnmsu
   ret <2 x double> %3
 }
 
+define <2 x double> @test_x86_fma_vfnmsub_pd_unary_fneg(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfnmsub_pd_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfnmsubpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x7d,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = fneg <2 x double> %a0
+  %2 = fneg <2 x double> %a2
+  %3 = call <2 x double> @llvm.fma.v2f64(<2 x double> %1, <2 x double> %a1, <2 x double> %2)
+  ret <2 x double> %3
+}
+
 define <8 x float> @test_x86_fma_vfnmsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfnmsub_ps_256:
 ; CHECK:       # %bb.0:
@@ -193,6 +295,17 @@ define <8 x float> @test_x86_fma_vfnmsub
   ret <8 x float> %3
 }
 
+define <8 x float> @test_x86_fma_vfnmsub_ps_256_unary_fneg(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfnmsub_ps_256_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfnmsubps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x7c,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = fneg <8 x float> %a0
+  %2 = fneg <8 x float> %a2
+  %3 = call <8 x float> @llvm.fma.v8f32(<8 x float> %1, <8 x float> %a1, <8 x float> %2)
+  ret <8 x float> %3
+}
+
 define <4 x double> @test_x86_fma_vfnmsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfnmsub_pd_256:
 ; CHECK:       # %bb.0:
@@ -204,6 +317,17 @@ define <4 x double> @test_x86_fma_vfnmsu
   ret <4 x double> %3
 }
 
+define <4 x double> @test_x86_fma_vfnmsub_pd_256_unary_fneg(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfnmsub_pd_256_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfnmsubpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x7d,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = fneg <4 x double> %a0
+  %2 = fneg <4 x double> %a2
+  %3 = call <4 x double> @llvm.fma.v4f64(<4 x double> %1, <4 x double> %a1, <4 x double> %2)
+  ret <4 x double> %3
+}
+
 ; VFMADDSUB
 define <4 x float> @test_x86_fma_vfmaddsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmaddsub_ps:
@@ -217,6 +341,18 @@ define <4 x float> @test_x86_fma_vfmadds
   ret <4 x float> %4
 }
 
+define <4 x float> @test_x86_fma_vfmaddsub_ps_unary_fneg(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfmaddsub_ps_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfmaddsubps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x5c,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = call <4 x float> @llvm.fma.v4f32(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
+  %2 = fneg <4 x float> %a2
+  %3 = call <4 x float> @llvm.fma.v4f32(<4 x float> %a0, <4 x float> %a1, <4 x float> %2)
+  %4 = shufflevector <4 x float> %3, <4 x float> %1, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+  ret <4 x float> %4
+}
+
 define <2 x double> @test_x86_fma_vfmaddsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmaddsub_pd:
 ; CHECK:       # %bb.0:
@@ -229,6 +365,18 @@ define <2 x double> @test_x86_fma_vfmadd
   ret <2 x double> %4
 }
 
+define <2 x double> @test_x86_fma_vfmaddsub_pd_unary_fneg(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfmaddsub_pd_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfmaddsubpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x5d,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = call <2 x double> @llvm.fma.v2f64(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
+  %2 = fneg <2 x double> %a2
+  %3 = call <2 x double> @llvm.fma.v2f64(<2 x double> %a0, <2 x double> %a1, <2 x double> %2)
+  %4 = shufflevector <2 x double> %3, <2 x double> %1, <2 x i32> <i32 0, i32 3>
+  ret <2 x double> %4
+}
+
 define <8 x float> @test_x86_fma_vfmaddsub_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmaddsub_ps_256:
 ; CHECK:       # %bb.0:
@@ -241,6 +389,18 @@ define <8 x float> @test_x86_fma_vfmadds
   ret <8 x float> %4
 }
 
+define <8 x float> @test_x86_fma_vfmaddsub_ps_256_unary_fneg(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfmaddsub_ps_256_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfmaddsubps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x5c,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = call <8 x float> @llvm.fma.v8f32(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
+  %2 = fneg <8 x float> %a2
+  %3 = call <8 x float> @llvm.fma.v8f32(<8 x float> %a0, <8 x float> %a1, <8 x float> %2)
+  %4 = shufflevector <8 x float> %3, <8 x float> %1, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+  ret <8 x float> %4
+}
+
 define <4 x double> @test_x86_fma_vfmaddsub_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmaddsub_pd_256:
 ; CHECK:       # %bb.0:
@@ -253,6 +413,18 @@ define <4 x double> @test_x86_fma_vfmadd
   ret <4 x double> %4
 }
 
+define <4 x double> @test_x86_fma_vfmaddsub_pd_256_unary_fneg(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfmaddsub_pd_256_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfmaddsubpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x5d,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = call <4 x double> @llvm.fma.v4f64(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
+  %2 = fneg <4 x double> %a2
+  %3 = call <4 x double> @llvm.fma.v4f64(<4 x double> %a0, <4 x double> %a1, <4 x double> %2)
+  %4 = shufflevector <4 x double> %3, <4 x double> %1, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+  ret <4 x double> %4
+}
+
 ; VFMSUBADD
 define <4 x float> @test_x86_fma_vfmsubadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmsubadd_ps:
@@ -266,6 +438,18 @@ define <4 x float> @test_x86_fma_vfmsuba
   ret <4 x float> %4
 }
 
+define <4 x float> @test_x86_fma_vfmsubadd_ps_unary_fneg(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfmsubadd_ps_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfmsubaddps %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x5e,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = call <4 x float> @llvm.fma.v4f32(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
+  %2 = fneg <4 x float> %a2
+  %3 = call <4 x float> @llvm.fma.v4f32(<4 x float> %a0, <4 x float> %a1, <4 x float> %2)
+  %4 = shufflevector <4 x float> %1, <4 x float> %3, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+  ret <4 x float> %4
+}
+
 define <2 x double> @test_x86_fma_vfmsubadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmsubadd_pd:
 ; CHECK:       # %bb.0:
@@ -278,6 +462,19 @@ define <2 x double> @test_x86_fma_vfmsub
   ret <2 x double> %4
 }
 
+define <2 x double> @test_x86_fma_vfmsubadd_pd_unary_fneg(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfmsubadd_pd_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfmsubaddpd %xmm2, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0xf9,0x5f,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = call <2 x double> @llvm.fma.v2f64(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
+  %2 = fneg <2 x double> %a2
+  %3 = call <2 x double> @llvm.fma.v2f64(<2 x double> %a0, <2 x double> %a1, <2 x double> %2)
+  %4 = shufflevector <2 x double> %1, <2 x double> %3, <2 x i32> <i32 0, i32 3>
+  ret <2 x double> %4
+}
+
+
 define <8 x float> @test_x86_fma_vfmsubadd_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmsubadd_ps_256:
 ; CHECK:       # %bb.0:
@@ -290,6 +487,18 @@ define <8 x float> @test_x86_fma_vfmsuba
   ret <8 x float> %4
 }
 
+define <8 x float> @test_x86_fma_vfmsubadd_ps_256_unary_fneg(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfmsubadd_ps_256_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfmsubaddps %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x5e,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = call <8 x float> @llvm.fma.v8f32(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
+  %2 = fneg <8 x float> %a2
+  %3 = call <8 x float> @llvm.fma.v8f32(<8 x float> %a0, <8 x float> %a1, <8 x float> %2)
+  %4 = shufflevector <8 x float> %1, <8 x float> %3, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+  ret <8 x float> %4
+}
+
 define <4 x double> @test_x86_fma_vfmsubadd_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-LABEL: test_x86_fma_vfmsubadd_pd_256:
 ; CHECK:       # %bb.0:
@@ -300,6 +509,18 @@ define <4 x double> @test_x86_fma_vfmsub
   %3 = call <4 x double> @llvm.fma.v4f64(<4 x double> %a0, <4 x double> %a1, <4 x double> %2)
   %4 = shufflevector <4 x double> %1, <4 x double> %3, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
   ret <4 x double> %4
+}
+
+define <4 x double> @test_x86_fma_vfmsubadd_pd_256_unary_fneg(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
+; CHECK-LABEL: test_x86_fma_vfmsubadd_pd_256_unary_fneg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfmsubaddpd %ymm2, %ymm1, %ymm0, %ymm0 # encoding: [0xc4,0xe3,0xfd,0x5f,0xc2,0x10]
+; CHECK-NEXT:    retq # encoding: [0xc3]
+  %1 = call <4 x double> @llvm.fma.v4f64(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
+  %2 = fneg <4 x double> %a2
+  %3 = call <4 x double> @llvm.fma.v4f64(<4 x double> %a0, <4 x double> %a1, <4 x double> %2)
+  %4 = shufflevector <4 x double> %1, <4 x double> %3, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+  ret <4 x double> %4
 }
 
 declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) #2




More information about the llvm-commits mailing list