[llvm] r336514 - [X86] Enhance combineFMA to look for FNEG behind an EXTRACT_VECTOR_ELT.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sun Jul 8 11:04:01 PDT 2018
Author: ctopper
Date: Sun Jul 8 11:04:00 2018
New Revision: 336514
URL: http://llvm.org/viewvc/llvm-project?rev=336514&view=rev
Log:
[X86] Enhance combineFMA to look for FNEG behind an EXTRACT_VECTOR_ELT.
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
llvm/trunk/test/CodeGen/X86/fma4-fneg-combine.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=336514&r1=336513&r2=336514&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sun Jul 8 11:04:00 2018
@@ -37762,11 +37762,23 @@ static SDValue combineFMA(SDNode *N, Sel
SDValue B = N->getOperand(1);
SDValue C = N->getOperand(2);
- auto invertIfNegative = [](SDValue &V) {
+ auto invertIfNegative = [&DAG](SDValue &V) {
if (SDValue NegVal = isFNEG(V.getNode())) {
V = NegVal;
return true;
}
+ // Look through extract_vector_elts. If it comes from an FNEG, create a
+ // new extract from the FNEG input.
+ if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ isa<ConstantSDNode>(V.getOperand(1)) &&
+ cast<ConstantSDNode>(V.getOperand(1))->getZExtValue() == 0) {
+ if (SDValue NegVal = isFNEG(V.getOperand(0).getNode())) {
+ V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
+ NegVal, V.getOperand(1));
+ return true;
+ }
+ }
+
return false;
};
Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll?rev=336514&r1=336513&r2=336514&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll Sun Jul 8 11:04:00 2018
@@ -5048,18 +5048,14 @@ define <4 x float> @test_mm_mask_fmsub_r
; X86-LABEL: test_mm_mask_fmsub_round_ss:
; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X86-NEXT: vxorps %xmm3, %xmm2, %xmm2
; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT: vfmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1}
; X86-NEXT: retl
;
; X64-LABEL: test_mm_mask_fmsub_round_ss:
; X64: # %bb.0: # %entry
-; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X64-NEXT: vxorps %xmm3, %xmm2, %xmm2
; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: vfmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1}
; X64-NEXT: retq
entry:
%0 = extractelement <4 x float> %__W, i64 0
@@ -5104,18 +5100,14 @@ define <4 x float> @test_mm_maskz_fmsub_
; X86-LABEL: test_mm_maskz_fmsub_round_ss:
; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X86-NEXT: vxorps %xmm3, %xmm2, %xmm2
; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X86-NEXT: vfmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
; X86-NEXT: retl
;
; X64-LABEL: test_mm_maskz_fmsub_round_ss:
; X64: # %bb.0: # %entry
-; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X64-NEXT: vxorps %xmm3, %xmm2, %xmm2
; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT: vfmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
; X64-NEXT: retq
entry:
%0 = extractelement <4 x float> %__A, i64 0
@@ -5163,21 +5155,15 @@ define <4 x float> @test_mm_mask3_fmsub_
; X86-LABEL: test_mm_mask3_fmsub_round_ss:
; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X86-NEXT: vxorps %xmm3, %xmm2, %xmm3
-; X86-NEXT: vfmadd213ss %xmm3, %xmm0, %xmm1
; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
+; X86-NEXT: vfmsub231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; X86-NEXT: vmovaps %xmm2, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test_mm_mask3_fmsub_round_ss:
; X64: # %bb.0: # %entry
-; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X64-NEXT: vxorps %xmm3, %xmm2, %xmm3
-; X64-NEXT: vfmadd213ss %xmm3, %xmm0, %xmm1
; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
+; X64-NEXT: vfmsub231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; X64-NEXT: vmovaps %xmm2, %xmm0
; X64-NEXT: retq
entry:
@@ -5224,18 +5210,14 @@ define <4 x float> @test_mm_mask_fnmadd_
; X86-LABEL: test_mm_mask_fnmadd_round_ss:
; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X86-NEXT: vxorps %xmm3, %xmm1, %xmm1
; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT: vfnmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1}
; X86-NEXT: retl
;
; X64-LABEL: test_mm_mask_fnmadd_round_ss:
; X64: # %bb.0: # %entry
-; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X64-NEXT: vxorps %xmm3, %xmm1, %xmm1
; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: vfnmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1}
; X64-NEXT: retq
entry:
%0 = extractelement <4 x float> %__W, i64 0
@@ -5280,18 +5262,14 @@ define <4 x float> @test_mm_maskz_fnmadd
; X86-LABEL: test_mm_maskz_fnmadd_round_ss:
; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X86-NEXT: vxorps %xmm3, %xmm1, %xmm1
; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X86-NEXT: vfnmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
; X86-NEXT: retl
;
; X64-LABEL: test_mm_maskz_fnmadd_round_ss:
; X64: # %bb.0: # %entry
-; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X64-NEXT: vxorps %xmm3, %xmm1, %xmm1
; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT: vfnmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
; X64-NEXT: retq
entry:
%0 = extractelement <4 x float> %__A, i64 0
@@ -5339,19 +5317,15 @@ define <4 x float> @test_mm_mask3_fnmadd
; X86-LABEL: test_mm_mask3_fnmadd_round_ss:
; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X86-NEXT: vxorps %xmm3, %xmm1, %xmm1
; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmadd231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT: vfnmadd231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; X86-NEXT: vmovaps %xmm2, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test_mm_mask3_fnmadd_round_ss:
; X64: # %bb.0: # %entry
-; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X64-NEXT: vxorps %xmm3, %xmm1, %xmm1
; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmadd231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT: vfnmadd231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; X64-NEXT: vmovaps %xmm2, %xmm0
; X64-NEXT: retq
entry:
@@ -5399,20 +5373,14 @@ define <4 x float> @test_mm_mask_fnmsub_
; X86-LABEL: test_mm_mask_fnmsub_round_ss:
; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X86-NEXT: vxorps %xmm3, %xmm1, %xmm1
-; X86-NEXT: vxorps %xmm3, %xmm2, %xmm2
; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT: vfnmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1}
; X86-NEXT: retl
;
; X64-LABEL: test_mm_mask_fnmsub_round_ss:
; X64: # %bb.0: # %entry
-; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X64-NEXT: vxorps %xmm3, %xmm1, %xmm1
-; X64-NEXT: vxorps %xmm3, %xmm2, %xmm2
; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: vfnmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1}
; X64-NEXT: retq
entry:
%0 = extractelement <4 x float> %__W, i64 0
@@ -5459,20 +5427,14 @@ define <4 x float> @test_mm_maskz_fnmsub
; X86-LABEL: test_mm_maskz_fnmsub_round_ss:
; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X86-NEXT: vxorps %xmm3, %xmm1, %xmm1
-; X86-NEXT: vxorps %xmm3, %xmm2, %xmm2
; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X86-NEXT: vfnmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
; X86-NEXT: retl
;
; X64-LABEL: test_mm_maskz_fnmsub_round_ss:
; X64: # %bb.0: # %entry
-; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X64-NEXT: vxorps %xmm3, %xmm1, %xmm1
-; X64-NEXT: vxorps %xmm3, %xmm2, %xmm2
; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT: vfnmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
; X64-NEXT: retq
entry:
%0 = extractelement <4 x float> %__A, i64 0
@@ -5522,23 +5484,15 @@ define <4 x float> @test_mm_mask3_fnmsub
; X86-LABEL: test_mm_mask3_fnmsub_round_ss:
; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X86-NEXT: vxorps %xmm3, %xmm1, %xmm1
-; X86-NEXT: vxorps %xmm3, %xmm2, %xmm3
-; X86-NEXT: vfmadd213ss %xmm3, %xmm0, %xmm1
; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
+; X86-NEXT: vfnmsub231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; X86-NEXT: vmovaps %xmm2, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test_mm_mask3_fnmsub_round_ss:
; X64: # %bb.0: # %entry
-; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0]
-; X64-NEXT: vxorps %xmm3, %xmm1, %xmm1
-; X64-NEXT: vxorps %xmm3, %xmm2, %xmm3
-; X64-NEXT: vfmadd213ss %xmm3, %xmm0, %xmm1
; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
+; X64-NEXT: vfnmsub231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; X64-NEXT: vmovaps %xmm2, %xmm0
; X64-NEXT: retq
entry:
@@ -6083,20 +6037,14 @@ define <2 x double> @test_mm_mask_fnmsub
; X86-LABEL: test_mm_mask_fnmsub_round_sd:
; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00]
-; X86-NEXT: vxorpd %xmm3, %xmm1, %xmm1
-; X86-NEXT: vxorpd %xmm3, %xmm2, %xmm2
; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmadd213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT: vfnmsub213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1}
; X86-NEXT: retl
;
; X64-LABEL: test_mm_mask_fnmsub_round_sd:
; X64: # %bb.0: # %entry
-; X64-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00]
-; X64-NEXT: vxorpd %xmm3, %xmm1, %xmm1
-; X64-NEXT: vxorpd %xmm3, %xmm2, %xmm2
; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmadd213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: vfnmsub213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1}
; X64-NEXT: retq
entry:
%0 = extractelement <2 x double> %__W, i64 0
@@ -6143,20 +6091,14 @@ define <2 x double> @test_mm_maskz_fnmsu
; X86-LABEL: test_mm_maskz_fnmsub_round_sd:
; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00]
-; X86-NEXT: vxorpd %xmm3, %xmm1, %xmm1
-; X86-NEXT: vxorpd %xmm3, %xmm2, %xmm2
; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmadd213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X86-NEXT: vfnmsub213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
; X86-NEXT: retl
;
; X64-LABEL: test_mm_maskz_fnmsub_round_sd:
; X64: # %bb.0: # %entry
-; X64-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00]
-; X64-NEXT: vxorpd %xmm3, %xmm1, %xmm1
-; X64-NEXT: vxorpd %xmm3, %xmm2, %xmm2
; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmadd213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT: vfnmsub213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
; X64-NEXT: retq
entry:
%0 = extractelement <2 x double> %__A, i64 0
@@ -6206,23 +6148,15 @@ define <2 x double> @test_mm_mask3_fnmsu
; X86-LABEL: test_mm_mask3_fnmsub_round_sd:
; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00]
-; X86-NEXT: vxorpd %xmm3, %xmm1, %xmm1
-; X86-NEXT: vxorpd %xmm3, %xmm2, %xmm3
-; X86-NEXT: vfmadd213sd %xmm3, %xmm0, %xmm1
; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1}
+; X86-NEXT: vfnmsub231sd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; X86-NEXT: vmovapd %xmm2, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test_mm_mask3_fnmsub_round_sd:
; X64: # %bb.0: # %entry
-; X64-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00]
-; X64-NEXT: vxorpd %xmm3, %xmm1, %xmm1
-; X64-NEXT: vxorpd %xmm3, %xmm2, %xmm3
-; X64-NEXT: vfmadd213sd %xmm3, %xmm0, %xmm1
; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1}
+; X64-NEXT: vfnmsub231sd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; X64-NEXT: vmovapd %xmm2, %xmm0
; X64-NEXT: retq
entry:
Modified: llvm/trunk/test/CodeGen/X86/fma4-fneg-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma4-fneg-combine.ll?rev=336514&r1=336513&r2=336514&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma4-fneg-combine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma4-fneg-combine.ll Sun Jul 8 11:04:00 2018
@@ -20,8 +20,7 @@ define <4 x float> @test1(<4 x float> %a
define <4 x float> @test2(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vxorps {{.*}}(%rip), %xmm2, %xmm2
-; CHECK-NEXT: vfmaddss %xmm2, %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vfmsubss %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
%res = tail call <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float> %a, <4 x float> %b, <4 x float> %sub.i)
@@ -31,8 +30,7 @@ define <4 x float> @test2(<4 x float> %a
define <4 x float> @test3(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vxorps {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-NEXT: vfmaddss %xmm2, %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vfnmaddss %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %b
%res = tail call <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float> %a, <4 x float> %sub.i, <4 x float> %c)
@@ -42,8 +40,7 @@ define <4 x float> @test3(<4 x float> %a
define <4 x float> @test4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0
-; CHECK-NEXT: vfmaddss %xmm2, %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vfnmaddss %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
%res = tail call <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float> %sub.i, <4 x float> %b, <4 x float> %c)
@@ -53,10 +50,7 @@ define <4 x float> @test4(<4 x float> %a
define <4 x float> @test5(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: test5:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovaps {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00]
-; CHECK-NEXT: vxorps %xmm3, %xmm0, %xmm0
-; CHECK-NEXT: vxorps %xmm3, %xmm2, %xmm2
-; CHECK-NEXT: vfmaddss %xmm2, %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vfnmsubss %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
%sub.i.2 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c
@@ -78,8 +72,7 @@ define <2 x double> @test6(<2 x double>
define <2 x double> @test7(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test7:
; CHECK: # %bb.0:
-; CHECK-NEXT: vxorpd {{.*}}(%rip), %xmm2, %xmm2
-; CHECK-NEXT: vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vfmsubsd %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %c
%res = tail call <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double> %a, <2 x double> %b, <2 x double> %sub.i)
@@ -89,8 +82,7 @@ define <2 x double> @test7(<2 x double>
define <2 x double> @test8(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vxorpd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-NEXT: vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vfnmaddsd %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %b
%res = tail call <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double> %a, <2 x double> %sub.i, <2 x double> %c)
@@ -100,8 +92,7 @@ define <2 x double> @test8(<2 x double>
define <2 x double> @test9(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test9:
; CHECK: # %bb.0:
-; CHECK-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0
-; CHECK-NEXT: vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vfnmaddsd %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %a
%res = tail call <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double> %sub.i, <2 x double> %b, <2 x double> %c)
@@ -111,10 +102,7 @@ define <2 x double> @test9(<2 x double>
define <2 x double> @test10(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; CHECK-LABEL: test10:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00]
-; CHECK-NEXT: vxorpd %xmm3, %xmm0, %xmm0
-; CHECK-NEXT: vxorpd %xmm3, %xmm2, %xmm2
-; CHECK-NEXT: vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vfnmsubsd %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %a
%sub.i.2 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %c
More information about the llvm-commits
mailing list