[llvm] 0bf6055 - [X86] Add a fneg test for fma with a splat constant vector

Phoebe Wang via llvm-commits llvm-commits at lists.llvm.org
Wed May 17 00:21:40 PDT 2023


Author: Evgenii Kudriashov
Date: 2023-05-17T15:21:34+08:00
New Revision: 0bf6055e49941d72ef74b7e8988c66a9c91529f0

URL: https://github.com/llvm/llvm-project/commit/0bf6055e49941d72ef74b7e8988c66a9c91529f0
DIFF: https://github.com/llvm/llvm-project/commit/0bf6055e49941d72ef74b7e8988c66a9c91529f0.diff

LOG: [X86] Add a fneg test for fma with a splat constant vector

Differential Revision: https://reviews.llvm.org/D147017

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/avx2-fma-fneg-combine.ll
    llvm/test/CodeGen/X86/fma-fneg-combine-2.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/avx2-fma-fneg-combine.ll b/llvm/test/CodeGen/X86/avx2-fma-fneg-combine.ll
index 33fd6abecd5b..9969734c97e9 100644
--- a/llvm/test/CodeGen/X86/avx2-fma-fneg-combine.ll
+++ b/llvm/test/CodeGen/X86/avx2-fma-fneg-combine.ll
@@ -6,6 +6,7 @@ declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>)
 declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
 declare float @llvm.fma.f32(float, float, float)
 declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
+declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>)
 
 ; This test checks combinations of FNEG and FMA intrinsics
 
@@ -148,3 +149,88 @@ define <8 x float> @test8(float %a, <8 x float> %b, <8 x float> %c)  {
   %t3 = tail call nsz <8 x float> @llvm.fma.v8f32(<8 x float> %t2, <8 x float> %b, <8 x float> %c)
   ret <8 x float> %t3
 }
+
+define <4 x double> @test9(<4 x double> %a) {
+; X32-LABEL: test9:
+; X32:       # %bb.0:
+; X32-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
+; X32-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [5.0E-1,5.0E-1,5.0E-1,5.0E-1]
+; X32-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm2 * ymm0) + ymm1
+; X32-NEXT:    retl
+;
+; X64-LABEL: test9:
+; X64:       # %bb.0:
+; X64-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
+; X64-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [5.0E-1,5.0E-1,5.0E-1,5.0E-1]
+; X64-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm2 * ymm0) + ymm1
+; X64-NEXT:    retq
+  %t = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> <double 5.000000e-01, double 5.000000e-01, double 5.000000e-01, double 5.000000e-01>, <4 x double> <double -5.000000e-01, double -5.000000e-01, double -5.000000e-01, double -5.000000e-01>)
+  ret <4 x double> %t
+}
+
+define <4 x double> @test10(<4 x double> %a, <4 x double> %b) {
+; X32-LABEL: test10:
+; X32:       # %bb.0:
+; X32-NEXT:    vmovapd {{.*#+}} ymm2 = <-9.5E+0,u,-5.5E+0,-2.5E+0>
+; X32-NEXT:    vfmadd213pd {{.*#+}} ymm2 = (ymm0 * ymm2) + ymm1
+; X32-NEXT:    vfmadd231pd {{.*#+}} ymm1 = (ymm0 * mem) + ymm1
+; X32-NEXT:    vaddpd %ymm1, %ymm2, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test10:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovapd {{.*#+}} ymm2 = <-9.5E+0,u,-5.5E+0,-2.5E+0>
+; X64-NEXT:    vfmadd213pd {{.*#+}} ymm2 = (ymm0 * ymm2) + ymm1
+; X64-NEXT:    vfmadd231pd {{.*#+}} ymm1 = (ymm0 * mem) + ymm1
+; X64-NEXT:    vaddpd %ymm1, %ymm2, %ymm0
+; X64-NEXT:    retq
+  %t0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> <double -95.00000e-01, double undef, double -55.00000e-01, double -25.00000e-01>, <4 x double> %b)
+  %t1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> <double 95.00000e-01, double undef, double 55.00000e-01, double 25.00000e-01>, <4 x double> %b)
+  %t2 = fadd <4 x double> %t0, %t1
+  ret <4 x double> %t2
+}
+
+define <4 x double> @test11(<4 x double> %a) {
+; X32-LABEL: test11:
+; X32:       # %bb.0:
+; X32-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [5.0E-1,2.5E+0,5.0E-1,2.5E+0]
+; X32-NEXT:    # ymm1 = mem[0,1,0,1]
+; X32-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
+; X32-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem
+; X32-NEXT:    retl
+;
+; X64-LABEL: test11:
+; X64:       # %bb.0:
+; X64-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [5.0E-1,2.5E+0,5.0E-1,2.5E+0]
+; X64-NEXT:    # ymm1 = mem[0,1,0,1]
+; X64-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
+; X64-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem
+; X64-NEXT:    retq
+  %t0 = fadd <4 x double> %a, <double 5.000000e-01, double 25.00000e-01, double 5.000000e-01, double 25.00000e-01>
+  %t1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %t0, <4 x double> <double 5.000000e-01, double 25.00000e-01, double 5.000000e-01, double 25.00000e-01>, <4 x double> <double -5.000000e-01, double -25.00000e-01, double -5.000000e-01, double -25.00000e-01>)
+  ret <4 x double> %t1
+}
+
+define <4 x double> @test12(<4 x double> %a, <4 x double> %b) {
+; X32-LABEL: test12:
+; X32:       # %bb.0:
+; X32-NEXT:    vmovapd {{.*#+}} ymm2 = [7.5E+0,2.5E+0,5.5E+0,9.5E+0]
+; X32-NEXT:    vfmadd213pd {{.*#+}} ymm2 = (ymm0 * ymm2) + mem
+; X32-NEXT:    vmovapd {{.*#+}} ymm0 = <u,2.5E+0,5.5E+0,9.5E+0>
+; X32-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem
+; X32-NEXT:    vaddpd %ymm0, %ymm2, %ymm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: test12:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovapd {{.*#+}} ymm2 = [7.5E+0,2.5E+0,5.5E+0,9.5E+0]
+; X64-NEXT:    vfmadd213pd {{.*#+}} ymm2 = (ymm0 * ymm2) + mem
+; X64-NEXT:    vmovapd {{.*#+}} ymm0 = <u,2.5E+0,5.5E+0,9.5E+0>
+; X64-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem
+; X64-NEXT:    vaddpd %ymm0, %ymm2, %ymm0
+; X64-NEXT:    retq
+  %t0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> <double 75.00000e-01, double 25.00000e-01, double 55.00000e-01, double 95.00000e-01>, <4 x double> <double -75.00000e-01, double undef, double -55.00000e-01, double -95.00000e-01>)
+  %t1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %b, <4 x double> <double undef, double 25.00000e-01, double 55.00000e-01, double 95.00000e-01>, <4 x double> <double -75.00000e-01, double -25.00000e-01, double -55.00000e-01, double -95.00000e-01>)
+  %t2 = fadd <4 x double> %t0, %t1
+  ret <4 x double> %t2
+}

diff  --git a/llvm/test/CodeGen/X86/fma-fneg-combine-2.ll b/llvm/test/CodeGen/X86/fma-fneg-combine-2.ll
index 6c0179e36f82..d3bc7399789d 100644
--- a/llvm/test/CodeGen/X86/fma-fneg-combine-2.ll
+++ b/llvm/test/CodeGen/X86/fma-fneg-combine-2.ll
@@ -126,5 +126,88 @@ define float @negated_constant(float %x) {
   ret float %nfma
 }
 
+define <4 x double> @negated_constant_v4f64(<4 x double> %a) {
+; FMA3-LABEL: negated_constant_v4f64:
+; FMA3:       # %bb.0:
+; FMA3-NEXT:    vmovapd {{.*#+}} ymm1 = [5.0E-1,2.5E-1,1.25E-1,6.25E-2]
+; FMA3-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem
+; FMA3-NEXT:    retq
+;
+; FMA4-LABEL: negated_constant_v4f64:
+; FMA4:       # %bb.0:
+; FMA4-NEXT:    vmovapd {{.*#+}} ymm1 = [5.0E-1,2.5E-1,1.25E-1,6.25E-2]
+; FMA4-NEXT:    vfmaddpd {{.*#+}} ymm0 = (ymm0 * ymm1) + mem
+; FMA4-NEXT:    retq
+  %t = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> <double 5.000000e-01, double 2.5000000e-01, double 1.25000000e-01, double 0.62500000e-01>, <4 x double> <double -5.000000e-01, double -2.5000000e-01, double -1.25000000e-01, double -0.62500000e-01>)
+  ret <4 x double> %t
+}
+
+define <4 x double> @negated_constant_v4f64_2fmas(<4 x double> %a, <4 x double> %b) {
+; FMA3-LABEL: negated_constant_v4f64_2fmas:
+; FMA3:       # %bb.0:
+; FMA3-NEXT:    vmovapd {{.*#+}} ymm2 = <-5.0E-1,u,-2.5E+0,-4.5E+0>
+; FMA3-NEXT:    vfmadd213pd {{.*#+}} ymm2 = (ymm0 * ymm2) + ymm1
+; FMA3-NEXT:    vfmadd231pd {{.*#+}} ymm1 = (ymm0 * mem) + ymm1
+; FMA3-NEXT:    vaddpd %ymm1, %ymm2, %ymm0
+; FMA3-NEXT:    retq
+;
+; FMA4-LABEL: negated_constant_v4f64_2fmas:
+; FMA4:       # %bb.0:
+; FMA4-NEXT:    vfmaddpd {{.*#+}} ymm2 = (ymm0 * mem) + ymm1
+; FMA4-NEXT:    vfmaddpd {{.*#+}} ymm0 = (ymm0 * mem) + ymm1
+; FMA4-NEXT:    vaddpd %ymm0, %ymm2, %ymm0
+; FMA4-NEXT:    retq
+  %t0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> <double -5.000000e-01, double undef, double -25.000000e-01, double -45.000000e-01>, <4 x double> %b)
+  %t1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> <double 5.000000e-01, double undef, double 25.000000e-01, double 45.000000e-01>, <4 x double> %b)
+  %t2 = fadd <4 x double> %t0, %t1
+  ret <4 x double> %t2
+}
+
+define <4 x double> @negated_constant_v4f64_fadd(<4 x double> %a) {
+; FMA3-LABEL: negated_constant_v4f64_fadd:
+; FMA3:       # %bb.0:
+; FMA3-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [1.5E+0,1.25E-1,1.5E+0,1.25E-1]
+; FMA3-NEXT:    # ymm1 = mem[0,1,0,1]
+; FMA3-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
+; FMA3-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem
+; FMA3-NEXT:    retq
+;
+; FMA4-LABEL: negated_constant_v4f64_fadd:
+; FMA4:       # %bb.0:
+; FMA4-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [1.5E+0,1.25E-1,1.5E+0,1.25E-1]
+; FMA4-NEXT:    # ymm1 = mem[0,1,0,1]
+; FMA4-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
+; FMA4-NEXT:    vfmaddpd {{.*#+}} ymm0 = (ymm0 * ymm1) + mem
+; FMA4-NEXT:    retq
+  %t0 = fadd <4 x double> %a, <double 15.000000e-01, double 1.25000000e-01, double 15.000000e-01, double 1.25000000e-01>
+  %t1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %t0, <4 x double> <double 15.000000e-01, double 1.25000000e-01, double 15.000000e-01, double 1.25000000e-01>, <4 x double> <double -15.000000e-01, double -1.25000000e-01, double -15.000000e-01, double -1.25000000e-01>)
+  ret <4 x double> %t1
+}
+
+define <4 x double> @negated_constant_v4f64_2fma_undefs(<4 x double> %a, <4 x double> %b) {
+; FMA3-LABEL: negated_constant_v4f64_2fma_undefs:
+; FMA3:       # %bb.0:
+; FMA3-NEXT:    vmovapd {{.*#+}} ymm2 = [5.0E-1,5.0E-1,5.0E-1,5.0E-1]
+; FMA3-NEXT:    vfmadd213pd {{.*#+}} ymm2 = (ymm0 * ymm2) + mem
+; FMA3-NEXT:    vmovapd {{.*#+}} ymm0 = <u,5.0E-1,5.0E-1,5.0E-1>
+; FMA3-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem
+; FMA3-NEXT:    vaddpd %ymm0, %ymm2, %ymm0
+; FMA3-NEXT:    retq
+;
+; FMA4-LABEL: negated_constant_v4f64_2fma_undefs:
+; FMA4:       # %bb.0:
+; FMA4-NEXT:    vmovapd {{.*#+}} ymm2 = [5.0E-1,5.0E-1,5.0E-1,5.0E-1]
+; FMA4-NEXT:    vfmaddpd {{.*#+}} ymm0 = (ymm0 * ymm2) + mem
+; FMA4-NEXT:    vmovapd {{.*#+}} ymm2 = <u,5.0E-1,5.0E-1,5.0E-1>
+; FMA4-NEXT:    vfmaddpd {{.*#+}} ymm1 = (ymm1 * ymm2) + mem
+; FMA4-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
+; FMA4-NEXT:    retq
+  %t0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> <double 5.000000e-01, double 5.000000e-01, double 5.000000e-01, double 5.000000e-01>, <4 x double> <double -5.000000e-01, double undef, double -5.000000e-01, double -5.000000e-01>)
+  %t1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %b, <4 x double> <double undef, double 5.000000e-01, double 5.000000e-01, double 5.000000e-01>, <4 x double> <double -5.000000e-01, double -5.000000e-01, double -5.000000e-01, double -5.000000e-01>)
+  %t2 = fadd <4 x double> %t0, %t1
+  ret <4 x double> %t2
+}
+
 declare float @llvm.fma.f32(float, float, float)
 declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
+declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>)


        


More information about the llvm-commits mailing list