[llvm] 9af9245 - [AArch64][SVE] Extend predicated fadd/fsub patterns to negative zero

David Green via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 12 11:44:07 PDT 2023


Author: David Green
Date: 2023-04-12T19:44:02+01:00
New Revision: 9af92455f36f08c111625dcd4951f91003bacee2

URL: https://github.com/llvm/llvm-project/commit/9af92455f36f08c111625dcd4951f91003bacee2
DIFF: https://github.com/llvm/llvm-project/commit/9af92455f36f08c111625dcd4951f91003bacee2.diff

LOG: [AArch64][SVE] Extend predicated fadd/fsub patterns to negative zero

This adds -0.0 patterns for fadd and fsub, to go with D147723. The fsub pattern
is only added for completeness but with -0.0 being the neutral element the fadd
case comes up from vectorized reductions.

Differential Revision: https://reviews.llvm.org/D147724

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/test/CodeGen/AArch64/sve-fp-combine.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index f87bfbd0c9aa..80a5db6be3ea 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -264,12 +264,14 @@ def AArch64fmul_m1 : EitherVSelectOrPassthruPatFrags<int_aarch64_sve_fmul, AArch
 def AArch64fadd_m1 : PatFrags<(ops node:$pg, node:$op1, node:$op2), [
     (int_aarch64_sve_fadd node:$pg, node:$op1, node:$op2),
     (vselect node:$pg, (AArch64fadd_p (SVEAllActive), node:$op1, node:$op2), node:$op1),
-    (AArch64fadd_p_nsz (SVEAllActive), node:$op1, (vselect node:$pg, node:$op2, (SVEDup0)))
+    (AArch64fadd_p_nsz (SVEAllActive), node:$op1, (vselect node:$pg, node:$op2, (SVEDup0))),
+    (AArch64fadd_p (SVEAllActive), node:$op1, (vselect node:$pg, node:$op2, (SVEDupNeg0)))
 ]>;
 def AArch64fsub_m1 : PatFrags<(ops node:$pg, node:$op1, node:$op2), [
     (int_aarch64_sve_fsub node:$pg, node:$op1, node:$op2),
     (vselect node:$pg, (AArch64fsub_p (SVEAllActive), node:$op1, node:$op2), node:$op1),
-    (AArch64fsub_p (SVEAllActive), node:$op1, (vselect node:$pg, node:$op2, (SVEDup0)))
+    (AArch64fsub_p (SVEAllActive), node:$op1, (vselect node:$pg, node:$op2, (SVEDup0))),
+    (AArch64fsub_p_nsz (SVEAllActive), node:$op1, (vselect node:$pg, node:$op2, (SVEDupNeg0)))
 ]>;
 
 def AArch64shadd : PatFrags<(ops node:$pg, node:$op1, node:$op2),

diff  --git a/llvm/test/CodeGen/AArch64/sve-fp-combine.ll b/llvm/test/CodeGen/AArch64/sve-fp-combine.ll
index 58242b5676aa..ef68f8e7eeca 100644
--- a/llvm/test/CodeGen/AArch64/sve-fp-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fp-combine.ll
@@ -889,10 +889,7 @@ define <vscale x 2 x double> @fsub_d_sel(<vscale x 2 x double> %a, <vscale x 2 x
 define <vscale x 8 x half> @fadd_h_sel_negzero(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: fadd_h_sel_negzero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #32768 // =0x8000
-; CHECK-NEXT:    mov z2.h, w8
-; CHECK-NEXT:    sel z1.h, p0, z1.h, z2.h
-; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
+; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %nz = fneg <vscale x 8 x half> zeroinitializer
   %sel = select <vscale x 8 x i1> %mask, <vscale x 8 x half> %b, <vscale x 8 x half> %nz
@@ -903,10 +900,7 @@ define <vscale x 8 x half> @fadd_h_sel_negzero(<vscale x 8 x half> %a, <vscale x
 define <vscale x 4 x float> @fadd_s_sel_negzero(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: fadd_s_sel_negzero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #-2147483648 // =0x80000000
-; CHECK-NEXT:    mov z2.s, w8
-; CHECK-NEXT:    sel z1.s, p0, z1.s, z2.s
-; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
+; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %nz = fneg <vscale x 4 x float> zeroinitializer
   %sel = select <vscale x 4 x i1> %mask, <vscale x 4 x float> %b, <vscale x 4 x float> %nz
@@ -917,10 +911,7 @@ define <vscale x 4 x float> @fadd_s_sel_negzero(<vscale x 4 x float> %a, <vscale
 define <vscale x 2 x double> @fadd_d_sel_negzero(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: fadd_d_sel_negzero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    sel z1.d, p0, z1.d, z2.d
-; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
+; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %nz = fneg <vscale x 2 x double> zeroinitializer
   %sel = select <vscale x 2 x i1> %mask, <vscale x 2 x double> %b, <vscale x 2 x double> %nz
@@ -931,10 +922,7 @@ define <vscale x 2 x double> @fadd_d_sel_negzero(<vscale x 2 x double> %a, <vsca
 define <vscale x 8 x half> @fsub_h_sel_negzero(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: fsub_h_sel_negzero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #32768 // =0x8000
-; CHECK-NEXT:    mov z2.h, w8
-; CHECK-NEXT:    sel z1.h, p0, z1.h, z2.h
-; CHECK-NEXT:    fsub z0.h, z0.h, z1.h
+; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %nz = fneg <vscale x 8 x half> zeroinitializer
   %sel = select <vscale x 8 x i1> %mask, <vscale x 8 x half> %b, <vscale x 8 x half> %nz
@@ -945,10 +933,7 @@ define <vscale x 8 x half> @fsub_h_sel_negzero(<vscale x 8 x half> %a, <vscale x
 define <vscale x 4 x float> @fsub_s_sel_negzero(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: fsub_s_sel_negzero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #-2147483648 // =0x80000000
-; CHECK-NEXT:    mov z2.s, w8
-; CHECK-NEXT:    sel z1.s, p0, z1.s, z2.s
-; CHECK-NEXT:    fsub z0.s, z0.s, z1.s
+; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %nz = fneg <vscale x 4 x float> zeroinitializer
   %sel = select <vscale x 4 x i1> %mask, <vscale x 4 x float> %b, <vscale x 4 x float> %nz
@@ -959,10 +944,7 @@ define <vscale x 4 x float> @fsub_s_sel_negzero(<vscale x 4 x float> %a, <vscale
 define <vscale x 2 x double> @fsub_d_sel_negzero(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: fsub_d_sel_negzero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    sel z1.d, p0, z1.d, z2.d
-; CHECK-NEXT:    fsub z0.d, z0.d, z1.d
+; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %nz = fneg <vscale x 2 x double> zeroinitializer
   %sel = select <vscale x 2 x i1> %mask, <vscale x 2 x double> %b, <vscale x 2 x double> %nz


        


More information about the llvm-commits mailing list