[llvm] ac8b4f8 - [AArch64][SVE2] Add pattern for BCAX (#77159)

via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 8 15:51:36 PST 2024


Author: Usman Nadeem
Date: 2024-01-08T15:51:33-08:00
New Revision: ac8b4f874945f83eec8c8f56d9fc80093e02a7b2

URL: https://github.com/llvm/llvm-project/commit/ac8b4f874945f83eec8c8f56d9fc80093e02a7b2
DIFF: https://github.com/llvm/llvm-project/commit/ac8b4f874945f83eec8c8f56d9fc80093e02a7b2.diff

LOG: [AArch64][SVE2] Add pattern for BCAX (#77159)

Bitwise clear and exclusive or
Add pattern for:
    xor x, (and y, not(z)) -> bcax x, y, z

Added: 
    llvm/test/CodeGen/AArch64/sve2-bcax.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 344a153890631e..ee10a7d1c706fc 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -453,6 +453,9 @@ def AArch64msb_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3),
 def AArch64eor3 : PatFrags<(ops node:$op1, node:$op2, node:$op3),
                            [(int_aarch64_sve_eor3 node:$op1, node:$op2, node:$op3),
                             (xor node:$op1, (xor node:$op2, node:$op3))]>;
+def AArch64bcax : PatFrags<(ops node:$op1, node:$op2, node:$op3),
+                           [(int_aarch64_sve_bcax node:$op1, node:$op2, node:$op3),
+                            (xor node:$op1, (and node:$op2, (vnot node:$op3)))]>;
 
 def AArch64fmla_m1 : PatFrags<(ops node:$pg, node:$za, node:$zn, node:$zm),
                               [(int_aarch64_sve_fmla node:$pg, node:$za, node:$zn, node:$zm),
@@ -3714,7 +3717,7 @@ let Predicates = [HasSVE2orSME] in {
 
   // SVE2 bitwise ternary operations
   defm EOR3_ZZZZ  : sve2_int_bitwise_ternary_op<0b000, "eor3",  AArch64eor3>;
-  defm BCAX_ZZZZ  : sve2_int_bitwise_ternary_op<0b010, "bcax",  int_aarch64_sve_bcax>;
+  defm BCAX_ZZZZ  : sve2_int_bitwise_ternary_op<0b010, "bcax",  AArch64bcax>;
   defm BSL_ZZZZ   : sve2_int_bitwise_ternary_op<0b001, "bsl",   int_aarch64_sve_bsl, AArch64bsp>;
   defm BSL1N_ZZZZ : sve2_int_bitwise_ternary_op<0b011, "bsl1n", int_aarch64_sve_bsl1n>;
   defm BSL2N_ZZZZ : sve2_int_bitwise_ternary_op<0b101, "bsl2n", int_aarch64_sve_bsl2n>;

diff  --git a/llvm/test/CodeGen/AArch64/sve2-bcax.ll b/llvm/test/CodeGen/AArch64/sve2-bcax.ll
new file mode 100644
index 00000000000000..c4a82e69a05ae0
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve2-bcax.ll
@@ -0,0 +1,143 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64 -mattr=+sve < %s -o - | FileCheck --check-prefix=SVE %s
+; RUN: llc -mtriple=aarch64 -mattr=+sve2 < %s -o - | FileCheck --check-prefix=SVE2 %s
+
+define <vscale x 2 x i64> @bcax_nxv2i64_1(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2) {
+; SVE-LABEL: bcax_nxv2i64_1:
+; SVE:       // %bb.0:
+; SVE-NEXT:    bic z1.d, z2.d, z1.d
+; SVE-NEXT:    eor z0.d, z1.d, z0.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: bcax_nxv2i64_1:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    bcax z0.d, z0.d, z2.d, z1.d
+; SVE2-NEXT:    ret
+  %4 = xor <vscale x 2 x i64> %1, splat (i64 -1)
+  %5 = and <vscale x 2 x i64> %4, %2
+  %6 = xor <vscale x 2 x i64> %5, %0
+  ret <vscale x 2 x i64> %6
+}
+
+define <vscale x 2 x i64> @bcax_nxv2i64_2(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2) {
+; SVE-LABEL: bcax_nxv2i64_2:
+; SVE:       // %bb.0:
+; SVE-NEXT:    bic z0.d, z0.d, z1.d
+; SVE-NEXT:    eor z0.d, z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: bcax_nxv2i64_2:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    bcax z2.d, z2.d, z0.d, z1.d
+; SVE2-NEXT:    mov z0.d, z2.d
+; SVE2-NEXT:    ret
+  %4 = xor <vscale x 2 x i64> %1, splat (i64 -1)
+  %5 = and <vscale x 2 x i64> %4, %0
+  %6 = xor <vscale x 2 x i64> %5, %2
+  ret <vscale x 2 x i64> %6
+}
+
+define <vscale x 4 x i32> @bcax_nxv4i32_1(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2) {
+; SVE-LABEL: bcax_nxv4i32_1:
+; SVE:       // %bb.0:
+; SVE-NEXT:    bic z1.d, z2.d, z1.d
+; SVE-NEXT:    eor z0.d, z1.d, z0.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: bcax_nxv4i32_1:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    bcax z0.d, z0.d, z2.d, z1.d
+; SVE2-NEXT:    ret
+  %4 = xor <vscale x 4 x i32> %1, splat (i32 -1)
+  %5 = and <vscale x 4 x i32> %4, %2
+  %6 = xor <vscale x 4 x i32> %5, %0
+  ret <vscale x 4 x i32> %6
+}
+
+define <vscale x 4 x i32> @bcax_nxv4i32_2(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2) {
+; SVE-LABEL: bcax_nxv4i32_2:
+; SVE:       // %bb.0:
+; SVE-NEXT:    bic z0.d, z0.d, z1.d
+; SVE-NEXT:    eor z0.d, z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: bcax_nxv4i32_2:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    bcax z2.d, z2.d, z0.d, z1.d
+; SVE2-NEXT:    mov z0.d, z2.d
+; SVE2-NEXT:    ret
+  %4 = xor <vscale x 4 x i32> %1, splat (i32 -1)
+  %5 = and <vscale x 4 x i32> %4, %0
+  %6 = xor <vscale x 4 x i32> %5, %2
+  ret <vscale x 4 x i32> %6
+}
+
+define <vscale x 8 x i16> @bcax_nxv8i16_1(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2) {
+; SVE-LABEL: bcax_nxv8i16_1:
+; SVE:       // %bb.0:
+; SVE-NEXT:    bic z1.d, z2.d, z1.d
+; SVE-NEXT:    eor z0.d, z1.d, z0.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: bcax_nxv8i16_1:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    bcax z0.d, z0.d, z2.d, z1.d
+; SVE2-NEXT:    ret
+  %4 = xor <vscale x 8 x i16> %1, splat (i16 -1)
+  %5 = and <vscale x 8 x i16> %4, %2
+  %6 = xor <vscale x 8 x i16> %5, %0
+  ret <vscale x 8 x i16> %6
+}
+
+define <vscale x 8 x i16> @bcax_nxv8i16_2(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2) {
+; SVE-LABEL: bcax_nxv8i16_2:
+; SVE:       // %bb.0:
+; SVE-NEXT:    bic z0.d, z0.d, z1.d
+; SVE-NEXT:    eor z0.d, z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: bcax_nxv8i16_2:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    bcax z2.d, z2.d, z0.d, z1.d
+; SVE2-NEXT:    mov z0.d, z2.d
+; SVE2-NEXT:    ret
+  %4 = xor <vscale x 8 x i16> %1, splat (i16 -1)
+  %5 = and <vscale x 8 x i16> %4, %0
+  %6 = xor <vscale x 8 x i16> %5, %2
+  ret <vscale x 8 x i16> %6
+}
+
+define <vscale x 16 x i8> @bcax_nxv16i8_1(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) {
+; SVE-LABEL: bcax_nxv16i8_1:
+; SVE:       // %bb.0:
+; SVE-NEXT:    bic z1.d, z2.d, z1.d
+; SVE-NEXT:    eor z0.d, z1.d, z0.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: bcax_nxv16i8_1:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    bcax z0.d, z0.d, z2.d, z1.d
+; SVE2-NEXT:    ret
+  %4 = xor <vscale x 16 x i8> %1, splat (i8 -1)
+  %5 = and <vscale x 16 x i8> %4, %2
+  %6 = xor <vscale x 16 x i8> %5, %0
+  ret <vscale x 16 x i8> %6
+}
+
+define <vscale x 16 x i8> @bcax_nxv16i8_2(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) {
+; SVE-LABEL: bcax_nxv16i8_2:
+; SVE:       // %bb.0:
+; SVE-NEXT:    bic z0.d, z0.d, z1.d
+; SVE-NEXT:    eor z0.d, z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: bcax_nxv16i8_2:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    bcax z2.d, z2.d, z0.d, z1.d
+; SVE2-NEXT:    mov z0.d, z2.d
+; SVE2-NEXT:    ret
+  %4 = xor <vscale x 16 x i8> %1, splat (i8 -1)
+  %5 = and <vscale x 16 x i8> %4, %0
+  %6 = xor <vscale x 16 x i8> %5, %2
+  ret <vscale x 16 x i8> %6
+}


        


More information about the llvm-commits mailing list