[llvm] 836f790 - [AArch64][SVE] Add patterns to select masked add/sub instructions
Cullen Rhodes via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 25 00:22:37 PDT 2022
Author: Cullen Rhodes
Date: 2022-07-25T07:22:05Z
New Revision: 836f790bb17296520b06f275b16b53ae243b0369
URL: https://github.com/llvm/llvm-project/commit/836f790bb17296520b06f275b16b53ae243b0369
DIFF: https://github.com/llvm/llvm-project/commit/836f790bb17296520b06f275b16b53ae243b0369.diff
LOG: [AArch64][SVE] Add patterns to select masked add/sub instructions
When lowering add(a, select(mask, b, splat(0))) the sel instruction can
be removed by using predicated add/sub instructions.
Reviewed By: paulwalker-arm
Differential Revision: https://reviews.llvm.org/D129751
Added:
Modified:
llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
llvm/test/CodeGen/AArch64/sve-masked-int-arith.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 4032c4667bc7..043640423353 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -337,6 +337,12 @@ def AArch64bic : PatFrags<(ops node:$op1, node:$op2),
def AArch64subr : PatFrag<(ops node:$op1, node:$op2),
(sub node:$op2, node:$op1)>;
+def AArch64add_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2),
+ [(int_aarch64_sve_add node:$pred, node:$op1, node:$op2),
+ (add node:$op1, (vselect node:$pred, node:$op2, (SVEDup0)))]>;
+def AArch64sub_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2),
+ [(int_aarch64_sve_sub node:$pred, node:$op1, node:$op2),
+ (sub node:$op1, (vselect node:$pred, node:$op2, (SVEDup0)))]>;
let Predicates = [HasSVE] in {
defm RDFFR_PPz : sve_int_rdffr_pred<0b0, "rdffr", int_aarch64_sve_rdffr_z>;
@@ -359,8 +365,8 @@ let Predicates = [HasSVEorSME] in {
defm EOR_ZZZ : sve_int_bin_cons_log<0b10, "eor", xor>;
defm BIC_ZZZ : sve_int_bin_cons_log<0b11, "bic", AArch64bic>;
- defm ADD_ZPmZ : sve_int_bin_pred_arit_0<0b000, "add", "ADD_ZPZZ", int_aarch64_sve_add, DestructiveBinaryComm>;
- defm SUB_ZPmZ : sve_int_bin_pred_arit_0<0b001, "sub", "SUB_ZPZZ", int_aarch64_sve_sub, DestructiveBinaryCommWithRev, "SUBR_ZPmZ">;
+ defm ADD_ZPmZ : sve_int_bin_pred_arit_0<0b000, "add", "ADD_ZPZZ", AArch64add_m1, DestructiveBinaryComm>;
+ defm SUB_ZPmZ : sve_int_bin_pred_arit_0<0b001, "sub", "SUB_ZPZZ", AArch64sub_m1, DestructiveBinaryCommWithRev, "SUBR_ZPmZ">;
defm SUBR_ZPmZ : sve_int_bin_pred_arit_0<0b011, "subr", "SUBR_ZPZZ", int_aarch64_sve_subr, DestructiveBinaryCommWithRev, "SUB_ZPmZ", /*isReverseInstr*/ 1>;
defm ORR_ZPmZ : sve_int_bin_pred_log<0b000, "orr", "ORR_ZPZZ", int_aarch64_sve_orr, DestructiveBinaryComm>;
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-masked-int-arith.ll
index 10b0de8e3f92..e53239079fd8 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-int-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-int-arith.ll
@@ -8,9 +8,7 @@
define <vscale x 16 x i8> @masked_add_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: masked_add_nxv16i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z2.b, #0 // =0x0
-; CHECK-NEXT: sel z1.b, p0, z1.b, z2.b
-; CHECK-NEXT: add z0.b, z0.b, z1.b
+; CHECK-NEXT: add z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%select = select <vscale x 16 x i1> %mask, <vscale x 16 x i8> %b, <vscale x 16 x i8> zeroinitializer
%ret = add <vscale x 16 x i8> %a, %select
@@ -20,9 +18,7 @@ define <vscale x 16 x i8> @masked_add_nxv16i8(<vscale x 16 x i8> %a, <vscale x 1
define <vscale x 8 x i16> @masked_add_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: masked_add_nxv8i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z2.h, #0 // =0x0
-; CHECK-NEXT: sel z1.h, p0, z1.h, z2.h
-; CHECK-NEXT: add z0.h, z0.h, z1.h
+; CHECK-NEXT: add z0.h, p0/m, z0.h, z1.h
; CHECK-NEXT: ret
%select = select <vscale x 8 x i1> %mask, <vscale x 8 x i16> %b, <vscale x 8 x i16> zeroinitializer
%ret = add <vscale x 8 x i16> %a, %select
@@ -32,9 +28,7 @@ define <vscale x 8 x i16> @masked_add_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8
define <vscale x 4 x i32> @masked_add_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: masked_add_nxv4i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z2.s, #0 // =0x0
-; CHECK-NEXT: sel z1.s, p0, z1.s, z2.s
-; CHECK-NEXT: add z0.s, z0.s, z1.s
+; CHECK-NEXT: add z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
%select = select <vscale x 4 x i1> %mask, <vscale x 4 x i32> %b, <vscale x 4 x i32> zeroinitializer
%ret = add <vscale x 4 x i32> %a, %select
@@ -44,9 +38,7 @@ define <vscale x 4 x i32> @masked_add_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4
define <vscale x 2 x i64> @masked_add_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: masked_add_nxv2i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z2.d, #0 // =0x0
-; CHECK-NEXT: sel z1.d, p0, z1.d, z2.d
-; CHECK-NEXT: add z0.d, z0.d, z1.d
+; CHECK-NEXT: add z0.d, p0/m, z0.d, z1.d
; CHECK-NEXT: ret
%select = select <vscale x 2 x i1> %mask, <vscale x 2 x i64> %b, <vscale x 2 x i64> zeroinitializer
%ret = add <vscale x 2 x i64> %a, %select
@@ -60,9 +52,7 @@ define <vscale x 2 x i64> @masked_add_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2
define <vscale x 16 x i8> @masked_sub_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: masked_sub_nxv16i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z2.b, #0 // =0x0
-; CHECK-NEXT: sel z1.b, p0, z1.b, z2.b
-; CHECK-NEXT: sub z0.b, z0.b, z1.b
+; CHECK-NEXT: sub z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%select = select <vscale x 16 x i1> %mask, <vscale x 16 x i8> %b, <vscale x 16 x i8> zeroinitializer
%ret = sub <vscale x 16 x i8> %a, %select
@@ -72,9 +62,7 @@ define <vscale x 16 x i8> @masked_sub_nxv16i8(<vscale x 16 x i8> %a, <vscale x 1
define <vscale x 8 x i16> @masked_sub_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: masked_sub_nxv8i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z2.h, #0 // =0x0
-; CHECK-NEXT: sel z1.h, p0, z1.h, z2.h
-; CHECK-NEXT: sub z0.h, z0.h, z1.h
+; CHECK-NEXT: sub z0.h, p0/m, z0.h, z1.h
; CHECK-NEXT: ret
%select = select <vscale x 8 x i1> %mask, <vscale x 8 x i16> %b, <vscale x 8 x i16> zeroinitializer
%ret = sub <vscale x 8 x i16> %a, %select
@@ -84,9 +72,7 @@ define <vscale x 8 x i16> @masked_sub_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8
define <vscale x 4 x i32> @masked_sub_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: masked_sub_nxv4i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z2.s, #0 // =0x0
-; CHECK-NEXT: sel z1.s, p0, z1.s, z2.s
-; CHECK-NEXT: sub z0.s, z0.s, z1.s
+; CHECK-NEXT: sub z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
%select = select <vscale x 4 x i1> %mask, <vscale x 4 x i32> %b, <vscale x 4 x i32> zeroinitializer
%ret = sub <vscale x 4 x i32> %a, %select
@@ -96,9 +82,7 @@ define <vscale x 4 x i32> @masked_sub_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4
define <vscale x 2 x i64> @masked_sub_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: masked_sub_nxv2i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z2.d, #0 // =0x0
-; CHECK-NEXT: sel z1.d, p0, z1.d, z2.d
-; CHECK-NEXT: sub z0.d, z0.d, z1.d
+; CHECK-NEXT: sub z0.d, p0/m, z0.d, z1.d
; CHECK-NEXT: ret
%select = select <vscale x 2 x i1> %mask, <vscale x 2 x i64> %b, <vscale x 2 x i64> zeroinitializer
%ret = sub <vscale x 2 x i64> %a, %select
More information about the llvm-commits
mailing list