[llvm] 49178a2 - [SVE] Extend isel pattern coverage for BIC.

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 28 05:20:54 PST 2022


Author: Paul Walker
Date: 2022-01-28T13:14:46Z
New Revision: 49178a2c4ee3f44b85d99c3730a7e71e2caf554a

URL: https://github.com/llvm/llvm-project/commit/49178a2c4ee3f44b85d99c3730a7e71e2caf554a
DIFF: https://github.com/llvm/llvm-project/commit/49178a2c4ee3f44b85d99c3730a7e71e2caf554a.diff

LOG: [SVE] Extend isel pattern coverage for BIC.

Adds patterns of the form "(and a, (not b)) -> bic".

NOTE: With this support I'm inclined to remove AArch64ISD::BIC,
but will leave that investigation for another time.

Differential Revision: https://reviews.llvm.org/D118365

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/lib/Target/AArch64/SVEInstrFormats.td
    llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
    llvm/test/CodeGen/AArch64/sve-int-log.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 73a680465f6f6..ea0a88e5c5913 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -292,7 +292,13 @@ def SDT_AArch64Arith_Unpred : SDTypeProfile<1, 2, [
   SDTCisSameAs<0,1>, SDTCisSameAs<1,2>
 ]>;
 
-def AArch64bic : SDNode<"AArch64ISD::BIC",  SDT_AArch64Arith_Unpred>;
+def AArch64bic_node : SDNode<"AArch64ISD::BIC",  SDT_AArch64Arith_Unpred>;
+
+def AArch64bic : PatFrags<(ops node:$op1, node:$op2),
+                          [(and node:$op1, (xor node:$op2, (AArch64dup (i32 -1)))),
+                           (and node:$op1, (xor node:$op2, (AArch64dup (i64 -1)))),
+                           (and node:$op1, (xor node:$op2, (SVEAllActive))),
+                           (AArch64bic_node node:$op1, node:$op2)]>;
 
 let Predicates = [HasSVE] in {
   defm RDFFR_PPz  : sve_int_rdffr_pred<0b0, "rdffr", int_aarch64_sve_rdffr_z>;
@@ -734,8 +740,8 @@ let Predicates = [HasSVEorStreamingSVE] in {
   defm PFIRST  : sve_int_pfirst<0b00000, "pfirst", int_aarch64_sve_pfirst>;
   defm PNEXT   : sve_int_pnext<0b00110, "pnext", int_aarch64_sve_pnext>;
 
-  defm AND_PPzPP   : sve_int_pred_log_and<0b0000, "and", int_aarch64_sve_and_z>;
-  defm BIC_PPzPP   : sve_int_pred_log<0b0001, "bic", int_aarch64_sve_bic_z>;
+  defm AND_PPzPP   : sve_int_pred_log_and<0b0000, "and", int_aarch64_sve_and_z, and>;
+  defm BIC_PPzPP   : sve_int_pred_log_and<0b0001, "bic", int_aarch64_sve_bic_z, AArch64bic>;
   defm EOR_PPzPP   : sve_int_pred_log<0b0010, "eor", int_aarch64_sve_eor_z, xor>;
   defm SEL_PPPP    : sve_int_pred_log<0b0011, "sel", vselect>;
   defm ANDS_PPzPP  : sve_int_pred_log<0b0100, "ands", null_frag>;

diff  --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 574b22124957b..91eaec970e1b1 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -1633,15 +1633,16 @@ multiclass sve_int_pred_log<bits<4> opc, string asm, SDPatternOperator op,
                                !cast<Instruction>(NAME), PTRUE_D>;
 }
 
-multiclass sve_int_pred_log_and<bits<4> opc, string asm, SDPatternOperator op> :
+multiclass sve_int_pred_log_and<bits<4> opc, string asm, SDPatternOperator op,
+                                SDPatternOperator op_nopred> :
   sve_int_pred_log<opc, asm, op> {
-  def : Pat<(nxv16i1 (and nxv16i1:$Op1, nxv16i1:$Op2)),
+  def : Pat<(nxv16i1 (op_nopred nxv16i1:$Op1, nxv16i1:$Op2)),
             (!cast<Instruction>(NAME) $Op1, $Op1, $Op2)>;
-  def : Pat<(nxv8i1 (and nxv8i1:$Op1, nxv8i1:$Op2)),
+  def : Pat<(nxv8i1 (op_nopred nxv8i1:$Op1, nxv8i1:$Op2)),
             (!cast<Instruction>(NAME) $Op1, $Op1, $Op2)>;
-  def : Pat<(nxv4i1 (and nxv4i1:$Op1, nxv4i1:$Op2)),
+  def : Pat<(nxv4i1 (op_nopred nxv4i1:$Op1, nxv4i1:$Op2)),
             (!cast<Instruction>(NAME) $Op1, $Op1, $Op2)>;
-  def : Pat<(nxv2i1 (and nxv2i1:$Op1, nxv2i1:$Op2)),
+  def : Pat<(nxv2i1 (op_nopred nxv2i1:$Op1, nxv2i1:$Op2)),
             (!cast<Instruction>(NAME) $Op1, $Op1, $Op2)>;
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll b/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
index 2faab7b04a02e..dcf2732aa4364 100644
--- a/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
+++ b/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
@@ -1083,14 +1083,13 @@ declare <vscale x 2 x i64> @llvm.fshr.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x
 define <vscale x 2 x i64> @fshl_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c){
 ; CHECK-LABEL: fshl_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z4.d, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    mov z3.d, z2.d
-; CHECK-NEXT:    eor z2.d, z2.d, z4.d
+; CHECK-NEXT:    mov z3.d, #63 // =0x3f
+; CHECK-NEXT:    mov z4.d, z2.d
+; CHECK-NEXT:    bic z2.d, z3.d, z2.d
+; CHECK-NEXT:    and z4.d, z4.d, #0x3f
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    and z3.d, z3.d, #0x3f
-; CHECK-NEXT:    and z2.d, z2.d, #0x3f
 ; CHECK-NEXT:    lsr z1.d, z1.d, #1
-; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, z3.d
+; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, z4.d
 ; CHECK-NEXT:    lsr z1.d, p0/m, z1.d, z2.d
 ; CHECK-NEXT:    orr z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -1101,15 +1100,13 @@ define <vscale x 2 x i64> @fshl_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
 define <vscale x 4 x i64> @fshl_illegal_i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i64> %c){
 ; CHECK-LABEL: fshl_illegal_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z6.d, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z6.d, #63 // =0x3f
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    eor z7.d, z4.d, z6.d
-; CHECK-NEXT:    eor z6.d, z5.d, z6.d
-; CHECK-NEXT:    and z7.d, z7.d, #0x3f
+; CHECK-NEXT:    bic z7.d, z6.d, z4.d
 ; CHECK-NEXT:    lsr z2.d, z2.d, #1
+; CHECK-NEXT:    bic z6.d, z6.d, z5.d
 ; CHECK-NEXT:    and z4.d, z4.d, #0x3f
 ; CHECK-NEXT:    and z5.d, z5.d, #0x3f
-; CHECK-NEXT:    and z6.d, z6.d, #0x3f
 ; CHECK-NEXT:    lsr z3.d, z3.d, #1
 ; CHECK-NEXT:    lsr z2.d, p0/m, z2.d, z7.d
 ; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, z4.d
@@ -1179,14 +1176,13 @@ define <vscale x 2 x i64> @fshl_rot_const_i64(<vscale x 2 x i64> %a){
 define <vscale x 2 x i64> @fshr_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c){
 ; CHECK-LABEL: fshr_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z4.d, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    mov z3.d, z2.d
-; CHECK-NEXT:    eor z2.d, z2.d, z4.d
+; CHECK-NEXT:    mov z3.d, #63 // =0x3f
+; CHECK-NEXT:    mov z4.d, z2.d
+; CHECK-NEXT:    bic z2.d, z3.d, z2.d
+; CHECK-NEXT:    and z4.d, z4.d, #0x3f
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    and z3.d, z3.d, #0x3f
-; CHECK-NEXT:    and z2.d, z2.d, #0x3f
 ; CHECK-NEXT:    lsl z0.d, z0.d, #1
-; CHECK-NEXT:    lsr z1.d, p0/m, z1.d, z3.d
+; CHECK-NEXT:    lsr z1.d, p0/m, z1.d, z4.d
 ; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, z2.d
 ; CHECK-NEXT:    orr z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-log.ll b/llvm/test/CodeGen/AArch64/sve-int-log.ll
index e8bdf67cba8b4..c445c98c6106c 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-log.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-log.ll
@@ -82,6 +82,94 @@ define <vscale x 16 x i1> @and_pred_b(<vscale x 16 x i1> %a, <vscale x 16 x i1>
   ret <vscale x 16 x i1> %res
 }
 
+define <vscale x 2 x i64> @bic_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: bic_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    bic z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %allones = shufflevector <vscale x 2 x i64> insertelement(<vscale x 2 x i64> undef, i64 -1, i32 0), <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %not_b = xor <vscale x 2 x i64> %b, %allones
+  %res = and <vscale x 2 x i64> %a, %not_b
+  ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 4 x i32> @bic_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: bic_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    bic z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %allones = shufflevector <vscale x 4 x i32> insertelement(<vscale x 4 x i32> undef, i32 -1, i32 0), <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %not_b = xor <vscale x 4 x i32> %b, %allones
+  %res = and <vscale x 4 x i32> %a, %not_b
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 8 x i16> @bic_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: bic_h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    bic z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %allones = shufflevector <vscale x 8 x i16> insertelement(<vscale x 8 x i16> undef, i16 -1, i32 0), <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %not_b = xor <vscale x 8 x i16> %b, %allones
+  %res = and <vscale x 8 x i16> %a, %not_b
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 16 x i8> @bic_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: bic_b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    bic z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %allones = shufflevector <vscale x 16 x i8> insertelement(<vscale x 16 x i8> undef, i8 -1, i32 0), <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %not_b = xor <vscale x 16 x i8> %b, %allones
+  %res = and <vscale x 16 x i8> %a, %not_b
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 2 x i1> @bic_pred_d(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) {
+; CHECK-LABEL: bic_pred_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    bic p0.b, p0/z, p0.b, p1.b
+; CHECK-NEXT:    ret
+  %allones = shufflevector <vscale x 2 x i1> insertelement(<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+  %not_b = xor <vscale x 2 x i1> %b, %allones
+  %res = and <vscale x 2 x i1> %a, %not_b
+  ret <vscale x 2 x i1> %res
+}
+
+define <vscale x 4 x i1> @bic_pred_s(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) {
+; CHECK-LABEL: bic_pred_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    bic p0.b, p0/z, p0.b, p1.b
+; CHECK-NEXT:    ret
+  %allones = shufflevector <vscale x 4 x i1> insertelement(<vscale x 4 x i1> undef, i1 true, i32 0), <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  %not_b = xor <vscale x 4 x i1> %b, %allones
+  %res = and <vscale x 4 x i1> %a, %not_b
+  ret <vscale x 4 x i1> %res
+}
+
+define <vscale x 8 x i1> @bic_pred_h(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) {
+; CHECK-LABEL: bic_pred_h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    bic p0.b, p0/z, p0.b, p1.b
+; CHECK-NEXT:    ret
+  %allones = shufflevector <vscale x 8 x i1> insertelement(<vscale x 8 x i1> undef, i1 true, i32 0), <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+  %not_b = xor <vscale x 8 x i1> %b, %allones
+  %res = and <vscale x 8 x i1> %a, %not_b
+  ret <vscale x 8 x i1> %res
+}
+
+define <vscale x 16 x i1> @bic_pred_b(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
+; CHECK-LABEL: bic_pred_b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    bic p0.b, p0/z, p0.b, p1.b
+; CHECK-NEXT:    ret
+  %allones = shufflevector <vscale x 16 x i1> insertelement(<vscale x 16 x i1> undef, i1 true, i32 0), <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+  %not_b = xor <vscale x 16 x i1> %b, %allones
+  %res = and <vscale x 16 x i1> %a, %not_b
+  ret <vscale x 16 x i1> %res
+}
+
 define <vscale x 2 x i64> @or_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: or_d:
 ; CHECK:       // %bb.0:


        


More information about the llvm-commits mailing list