[llvm] 1bb3488 - [ARM] Predicated VFMA patterns

David Green via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 12 10:36:20 PDT 2020


Author: David Green
Date: 2020-08-12T18:35:01+01:00
New Revision: 1bb348868501d1ae731ec859dfb43433e7e41022

URL: https://github.com/llvm/llvm-project/commit/1bb348868501d1ae731ec859dfb43433e7e41022
DIFF: https://github.com/llvm/llvm-project/commit/1bb348868501d1ae731ec859dfb43433e7e41022.diff

LOG: [ARM] Predicated VFMA patterns

Similar to the Two op + select patterns that were added recently, this
adds some patterns for select + fma to turn them into predicated
operations.

Differential Revision: https://reviews.llvm.org/D85824

Added: 
    

Modified: 
    llvm/lib/Target/ARM/ARMInstrMVE.td
    llvm/test/CodeGen/Thumb2/mve-fmas.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td
index 998edf3541ced..48b8d44cacfd0 100644
--- a/llvm/lib/Target/ARM/ARMInstrMVE.td
+++ b/llvm/lib/Target/ARM/ARMInstrMVE.td
@@ -3652,6 +3652,14 @@ multiclass MVE_VFMA_fp_multi<string iname, bit fms, MVEVectorVTInfo VTI> {
                 (Inst $add, $m1, $m2)>;
       def : Pat<(VTI.Vec (fma m1, (fneg m2), add)),
                 (Inst $add, $m1, $m2)>;
+      def : Pat<(VTI.Vec (vselect (VTI.Pred VCCR:$pred),
+                                  (VTI.Vec (fma (fneg m1), m2, add)),
+                                  add)),
+                (Inst $add, $m1, $m2, ARMVCCThen, $pred)>;
+      def : Pat<(VTI.Vec (vselect (VTI.Pred VCCR:$pred),
+                                  (VTI.Vec (fma m1, (fneg m2), add)),
+                                  add)),
+                (Inst $add, $m1, $m2, ARMVCCThen, $pred)>;
       def : Pat<(VTI.Vec (pred_int (fneg m1), m2, add, pred)),
                 (Inst $add, $m1, $m2, ARMVCCThen, $pred)>;
       def : Pat<(VTI.Vec (pred_int m1, (fneg m2), add, pred)),
@@ -3659,6 +3667,10 @@ multiclass MVE_VFMA_fp_multi<string iname, bit fms, MVEVectorVTInfo VTI> {
     } else {
       def : Pat<(VTI.Vec (fma m1, m2, add)),
                 (Inst $add, $m1, $m2)>;
+      def : Pat<(VTI.Vec (vselect (VTI.Pred VCCR:$pred),
+                                  (VTI.Vec (fma m1, m2, add)),
+                                  add)),
+                (Inst $add, $m1, $m2, ARMVCCThen, $pred)>;
       def : Pat<(VTI.Vec (pred_int m1, m2, add, pred)),
                 (Inst $add, $m1, $m2, ARMVCCThen, $pred)>;
     }
@@ -5538,6 +5550,10 @@ multiclass MVE_VFMA_qr_multi<string iname, MVEVectorVTInfo VTI,
     if scalar_addend then {
       def : Pat<(VTI.Vec (fma v1, v2, vs)),
                 (VTI.Vec (Inst v1, v2, is))>;
+      def : Pat<(VTI.Vec (vselect (VTI.Pred VCCR:$pred),
+                                  (VTI.Vec (fma v1, v2, vs)),
+                                  v1)),
+                (VTI.Vec (Inst v1, v2, is, ARMVCCThen, $pred))>;
       def : Pat<(VTI.Vec (pred_int v1, v2, vs, pred)),
                 (VTI.Vec (Inst v1, v2, is, ARMVCCThen, pred))>;
     } else {
@@ -5545,6 +5561,14 @@ multiclass MVE_VFMA_qr_multi<string iname, MVEVectorVTInfo VTI,
                 (VTI.Vec (Inst v2, v1, is))>;
       def : Pat<(VTI.Vec (fma vs, v1, v2)),
                 (VTI.Vec (Inst v2, v1, is))>;
+      def : Pat<(VTI.Vec (vselect (VTI.Pred VCCR:$pred),
+                                  (VTI.Vec (fma vs, v2, v1)),
+                                  v1)),
+                (VTI.Vec (Inst v1, v2, is, ARMVCCThen, $pred))>;
+      def : Pat<(VTI.Vec (vselect (VTI.Pred VCCR:$pred),
+                                  (VTI.Vec (fma v2, vs, v1)),
+                                  v1)),
+                (VTI.Vec (Inst v1, v2, is, ARMVCCThen, $pred))>;
       def : Pat<(VTI.Vec (pred_int v1, vs, v2, pred)),
                 (VTI.Vec (Inst v2, v1, is, ARMVCCThen, pred))>;
       def : Pat<(VTI.Vec (pred_int vs, v1, v2, pred)),

diff  --git a/llvm/test/CodeGen/Thumb2/mve-fmas.ll b/llvm/test/CodeGen/Thumb2/mve-fmas.ll
index cd91859b6dcbd..325239377da2d 100644
--- a/llvm/test/CodeGen/Thumb2/mve-fmas.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-fmas.ll
@@ -481,10 +481,8 @@ define arm_aapcs_vfpcc <8 x half> @vfma16_v1_pred(<8 x half> %src1, <8 x half> %
 ;
 ; CHECK-MVE-VMLA-LABEL: vfma16_v1_pred:
 ; CHECK-MVE-VMLA:       @ %bb.0: @ %entry
-; CHECK-MVE-VMLA-NEXT:    vmov q3, q0
-; CHECK-MVE-VMLA-NEXT:    vcmp.f16 lt, q1, zr
-; CHECK-MVE-VMLA-NEXT:    vfma.f16 q3, q1, q2
-; CHECK-MVE-VMLA-NEXT:    vpsel q0, q3, q0
+; CHECK-MVE-VMLA-NEXT:    vpt.f16 lt, q1, zr
+; CHECK-MVE-VMLA-NEXT:    vfmat.f16 q0, q1, q2
 ; CHECK-MVE-VMLA-NEXT:    bx lr
 ;
 ; CHECK-MVE-LABEL: vfma16_v1_pred:
@@ -628,10 +626,8 @@ define arm_aapcs_vfpcc <8 x half> @vfma16_v2_pred(<8 x half> %src1, <8 x half> %
 ;
 ; CHECK-MVE-VMLA-LABEL: vfma16_v2_pred:
 ; CHECK-MVE-VMLA:       @ %bb.0: @ %entry
-; CHECK-MVE-VMLA-NEXT:    vmov q3, q0
-; CHECK-MVE-VMLA-NEXT:    vcmp.f16 lt, q1, zr
-; CHECK-MVE-VMLA-NEXT:    vfma.f16 q3, q1, q2
-; CHECK-MVE-VMLA-NEXT:    vpsel q0, q3, q0
+; CHECK-MVE-VMLA-NEXT:    vpt.f16 lt, q1, zr
+; CHECK-MVE-VMLA-NEXT:    vfmat.f16 q0, q1, q2
 ; CHECK-MVE-VMLA-NEXT:    bx lr
 ;
 ; CHECK-MVE-LABEL: vfma16_v2_pred:
@@ -775,10 +771,8 @@ define arm_aapcs_vfpcc <8 x half> @vfms16_pred(<8 x half> %src1, <8 x half> %src
 ;
 ; CHECK-MVE-VMLA-LABEL: vfms16_pred:
 ; CHECK-MVE-VMLA:       @ %bb.0: @ %entry
-; CHECK-MVE-VMLA-NEXT:    vmov q3, q0
-; CHECK-MVE-VMLA-NEXT:    vcmp.f16 lt, q1, zr
-; CHECK-MVE-VMLA-NEXT:    vfms.f16 q3, q1, q2
-; CHECK-MVE-VMLA-NEXT:    vpsel q0, q3, q0
+; CHECK-MVE-VMLA-NEXT:    vpt.f16 lt, q1, zr
+; CHECK-MVE-VMLA-NEXT:    vfmst.f16 q0, q1, q2
 ; CHECK-MVE-VMLA-NEXT:    bx lr
 ;
 ; CHECK-MVE-LABEL: vfms16_pred:
@@ -926,11 +920,9 @@ define arm_aapcs_vfpcc <8 x half> @vfmar16_pred(<8 x half> %src1, <8 x half> %sr
 ; CHECK-MVE-VMLA-LABEL: vfmar16_pred:
 ; CHECK-MVE-VMLA:       @ %bb.0: @ %entry
 ; CHECK-MVE-VMLA-NEXT:    vcvtb.f16.f32 s8, s8
-; CHECK-MVE-VMLA-NEXT:    vcmp.f16 lt, q1, zr
 ; CHECK-MVE-VMLA-NEXT:    vmov.f16 r0, s8
-; CHECK-MVE-VMLA-NEXT:    vmov q2, q0
-; CHECK-MVE-VMLA-NEXT:    vfma.f16 q2, q1, r0
-; CHECK-MVE-VMLA-NEXT:    vpsel q0, q2, q0
+; CHECK-MVE-VMLA-NEXT:    vpt.f16 lt, q1, zr
+; CHECK-MVE-VMLA-NEXT:    vfmat.f16 q0, q1, r0
 ; CHECK-MVE-VMLA-NEXT:    bx lr
 ;
 ; CHECK-MVE-LABEL: vfmar16_pred:
@@ -1074,11 +1066,9 @@ define arm_aapcs_vfpcc <8 x half> @vfma16_pred(<8 x half> %src1, <8 x half> %src
 ; CHECK-MVE-VMLA-LABEL: vfma16_pred:
 ; CHECK-MVE-VMLA:       @ %bb.0: @ %entry
 ; CHECK-MVE-VMLA-NEXT:    vcvtb.f16.f32 s8, s8
-; CHECK-MVE-VMLA-NEXT:    vcmp.f16 lt, q1, zr
 ; CHECK-MVE-VMLA-NEXT:    vmov.f16 r0, s8
-; CHECK-MVE-VMLA-NEXT:    vmov q2, q0
-; CHECK-MVE-VMLA-NEXT:    vfmas.f16 q2, q1, r0
-; CHECK-MVE-VMLA-NEXT:    vpsel q0, q2, q0
+; CHECK-MVE-VMLA-NEXT:    vpt.f16 lt, q1, zr
+; CHECK-MVE-VMLA-NEXT:    vfmast.f16 q0, q1, r0
 ; CHECK-MVE-VMLA-NEXT:    bx lr
 ;
 ; CHECK-MVE-LABEL: vfma16_pred:
@@ -1218,10 +1208,8 @@ define arm_aapcs_vfpcc <4 x float> @vfma32_v1_pred(<4 x float> %src1, <4 x float
 ;
 ; CHECK-MVE-VMLA-LABEL: vfma32_v1_pred:
 ; CHECK-MVE-VMLA:       @ %bb.0: @ %entry
-; CHECK-MVE-VMLA-NEXT:    vmov q3, q0
-; CHECK-MVE-VMLA-NEXT:    vcmp.f32 lt, q1, zr
-; CHECK-MVE-VMLA-NEXT:    vfma.f32 q3, q1, q2
-; CHECK-MVE-VMLA-NEXT:    vpsel q0, q3, q0
+; CHECK-MVE-VMLA-NEXT:    vpt.f32 lt, q1, zr
+; CHECK-MVE-VMLA-NEXT:    vfmat.f32 q0, q1, q2
 ; CHECK-MVE-VMLA-NEXT:    bx lr
 ;
 ; CHECK-MVE-LABEL: vfma32_v1_pred:
@@ -1290,10 +1278,8 @@ define arm_aapcs_vfpcc <4 x float> @vfma32_v2_pred(<4 x float> %src1, <4 x float
 ;
 ; CHECK-MVE-VMLA-LABEL: vfma32_v2_pred:
 ; CHECK-MVE-VMLA:       @ %bb.0: @ %entry
-; CHECK-MVE-VMLA-NEXT:    vmov q3, q0
-; CHECK-MVE-VMLA-NEXT:    vcmp.f32 lt, q1, zr
-; CHECK-MVE-VMLA-NEXT:    vfma.f32 q3, q1, q2
-; CHECK-MVE-VMLA-NEXT:    vpsel q0, q3, q0
+; CHECK-MVE-VMLA-NEXT:    vpt.f32 lt, q1, zr
+; CHECK-MVE-VMLA-NEXT:    vfmat.f32 q0, q1, q2
 ; CHECK-MVE-VMLA-NEXT:    bx lr
 ;
 ; CHECK-MVE-LABEL: vfma32_v2_pred:
@@ -1362,10 +1348,8 @@ define arm_aapcs_vfpcc <4 x float> @vfms32_pred(<4 x float> %src1, <4 x float> %
 ;
 ; CHECK-MVE-VMLA-LABEL: vfms32_pred:
 ; CHECK-MVE-VMLA:       @ %bb.0: @ %entry
-; CHECK-MVE-VMLA-NEXT:    vmov q3, q0
-; CHECK-MVE-VMLA-NEXT:    vcmp.f32 lt, q1, zr
-; CHECK-MVE-VMLA-NEXT:    vfms.f32 q3, q1, q2
-; CHECK-MVE-VMLA-NEXT:    vpsel q0, q3, q0
+; CHECK-MVE-VMLA-NEXT:    vpt.f32 lt, q1, zr
+; CHECK-MVE-VMLA-NEXT:    vfmst.f32 q0, q1, q2
 ; CHECK-MVE-VMLA-NEXT:    bx lr
 ;
 ; CHECK-MVE-LABEL: vfms32_pred:
@@ -1437,10 +1421,8 @@ define arm_aapcs_vfpcc <4 x float> @vfmar32_pred(<4 x float> %src1, <4 x float>
 ; CHECK-MVE-VMLA-LABEL: vfmar32_pred:
 ; CHECK-MVE-VMLA:       @ %bb.0: @ %entry
 ; CHECK-MVE-VMLA-NEXT:    vmov r0, s8
-; CHECK-MVE-VMLA-NEXT:    vmov q2, q0
-; CHECK-MVE-VMLA-NEXT:    vcmp.f32 lt, q1, zr
-; CHECK-MVE-VMLA-NEXT:    vfma.f32 q2, q1, r0
-; CHECK-MVE-VMLA-NEXT:    vpsel q0, q2, q0
+; CHECK-MVE-VMLA-NEXT:    vpt.f32 lt, q1, zr
+; CHECK-MVE-VMLA-NEXT:    vfmat.f32 q0, q1, r0
 ; CHECK-MVE-VMLA-NEXT:    bx lr
 ;
 ; CHECK-MVE-LABEL: vfmar32_pred:
@@ -1513,10 +1495,8 @@ define arm_aapcs_vfpcc <4 x float> @vfmas32_pred(<4 x float> %src1, <4 x float>
 ; CHECK-MVE-VMLA-LABEL: vfmas32_pred:
 ; CHECK-MVE-VMLA:       @ %bb.0: @ %entry
 ; CHECK-MVE-VMLA-NEXT:    vmov r0, s8
-; CHECK-MVE-VMLA-NEXT:    vmov q2, q0
-; CHECK-MVE-VMLA-NEXT:    vcmp.f32 lt, q1, zr
-; CHECK-MVE-VMLA-NEXT:    vfmas.f32 q2, q1, r0
-; CHECK-MVE-VMLA-NEXT:    vpsel q0, q2, q0
+; CHECK-MVE-VMLA-NEXT:    vpt.f32 lt, q1, zr
+; CHECK-MVE-VMLA-NEXT:    vfmast.f32 q0, q1, r0
 ; CHECK-MVE-VMLA-NEXT:    bx lr
 ;
 ; CHECK-MVE-LABEL: vfmas32_pred:


        


More information about the llvm-commits mailing list