[llvm] 7927722 - [AArch64][SVE2] Add patterns for eor3

via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 5 02:17:38 PST 2022


Author: Tiehu Zhang
Date: 2022-12-05T18:16:55+08:00
New Revision: 7927722a74cfd49af0125edd39aeaad86fcb2333

URL: https://github.com/llvm/llvm-project/commit/7927722a74cfd49af0125edd39aeaad86fcb2333
DIFF: https://github.com/llvm/llvm-project/commit/7927722a74cfd49af0125edd39aeaad86fcb2333.diff

LOG: [AArch64][SVE2] Add patterns for eor3

Add patterns for:
    eor x, (eor y, z) -> eor3 x, y, z

Reviewed By: dmgreen, sdesmalen
Differential Revision: https://reviews.llvm.org/D138793

Added: 
    llvm/test/CodeGen/AArch64/sve2-eor3.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index d24795a78ed45..597c7a27cf33a 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -375,6 +375,9 @@ def AArch64mls_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3),
                               (sub node:$op1, (AArch64mul_p_oneuse node:$pred, node:$op2, node:$op3)),
                               // sub(a, select(mask, mul(b, c), splat(0))) -> mls(a, mask, b, c)
                               (sub node:$op1, (vselect node:$pred, (AArch64mul_p_oneuse (SVEAllActive), node:$op2, node:$op3), (SVEDup0)))]>;
+def AArch64eor3 : PatFrags<(ops node:$op1, node:$op2, node:$op3),
+                           [(int_aarch64_sve_eor3 node:$op1, node:$op2, node:$op3),
+                            (xor node:$op1, (xor node:$op2, node:$op3))]>;
 
 class fma_patfrags<SDPatternOperator intrinsic, SDPatternOperator sdnode>
     : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3),
@@ -3527,7 +3530,7 @@ let Predicates = [HasSVE2orSME] in {
   defm FMLSLT_ZZZ_SHH : sve2_fp_mla_long<0b11, "fmlslt", int_aarch64_sve_fmlslt>;
 
   // SVE2 bitwise ternary operations
-  defm EOR3_ZZZZ  : sve2_int_bitwise_ternary_op<0b000, "eor3",  int_aarch64_sve_eor3>;
+  defm EOR3_ZZZZ  : sve2_int_bitwise_ternary_op<0b000, "eor3",  AArch64eor3>;
   defm BCAX_ZZZZ  : sve2_int_bitwise_ternary_op<0b010, "bcax",  int_aarch64_sve_bcax>;
   defm BSL_ZZZZ   : sve2_int_bitwise_ternary_op<0b001, "bsl",   int_aarch64_sve_bsl, AArch64bsp>;
   defm BSL1N_ZZZZ : sve2_int_bitwise_ternary_op<0b011, "bsl1n", int_aarch64_sve_bsl1n>;

diff  --git a/llvm/test/CodeGen/AArch64/sve2-eor3.ll b/llvm/test/CodeGen/AArch64/sve2-eor3.ll
new file mode 100644
index 0000000000000..665008ccf7e49
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve2-eor3.ll
@@ -0,0 +1,151 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-none-eabi -mattr=+sve < %s -o - | FileCheck --check-prefix=SVE %s
+; RUN: llc -mtriple=aarch64-none-eabi -mattr=+sve2 < %s -o - | FileCheck --check-prefix=SVE2 %s
+
+define <vscale x 16 x i8> @eor3_nxv16i8_left(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) {
+; SVE-LABEL: eor3_nxv16i8_left:
+; SVE:       // %bb.0:
+; SVE-NEXT:    eor z0.d, z0.d, z1.d
+; SVE-NEXT:    eor z0.d, z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: eor3_nxv16i8_left:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    eor3 z0.d, z0.d, z1.d, z2.d
+; SVE2-NEXT:    ret
+  %4 = xor <vscale x 16 x i8> %0, %1
+  %5 = xor <vscale x 16 x i8> %4, %2
+  ret <vscale x 16 x i8> %5
+}
+
+define <vscale x 16 x i8> @eor3_nxv16i8_right(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) {
+; SVE-LABEL: eor3_nxv16i8_right:
+; SVE:       // %bb.0:
+; SVE-NEXT:    eor z0.d, z0.d, z1.d
+; SVE-NEXT:    eor z0.d, z2.d, z0.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: eor3_nxv16i8_right:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    eor3 z2.d, z2.d, z0.d, z1.d
+; SVE2-NEXT:    mov z0.d, z2.d
+; SVE2-NEXT:    ret
+  %4 = xor <vscale x 16 x i8> %0, %1
+  %5 = xor <vscale x 16 x i8> %2, %4
+  ret <vscale x 16 x i8> %5
+}
+
+define <vscale x 8 x i16> @eor3_nxv8i16_left(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2) {
+; SVE-LABEL: eor3_nxv8i16_left:
+; SVE:       // %bb.0:
+; SVE-NEXT:    eor z0.d, z0.d, z1.d
+; SVE-NEXT:    eor z0.d, z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: eor3_nxv8i16_left:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    eor3 z0.d, z0.d, z1.d, z2.d
+; SVE2-NEXT:    ret
+  %4 = xor <vscale x 8 x i16> %0, %1
+  %5 = xor <vscale x 8 x i16> %4, %2
+  ret <vscale x 8 x i16> %5
+}
+
+define <vscale x 8 x i16> @eor3_nxv8i16_right(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2) {
+; SVE-LABEL: eor3_nxv8i16_right:
+; SVE:       // %bb.0:
+; SVE-NEXT:    eor z0.d, z0.d, z1.d
+; SVE-NEXT:    eor z0.d, z2.d, z0.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: eor3_nxv8i16_right:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    eor3 z2.d, z2.d, z0.d, z1.d
+; SVE2-NEXT:    mov z0.d, z2.d
+; SVE2-NEXT:    ret
+  %4 = xor <vscale x 8 x i16> %0, %1
+  %5 = xor <vscale x 8 x i16> %2, %4
+  ret <vscale x 8 x i16> %5
+}
+
+define <vscale x 4 x i32> @eor3_nxv4i32_left(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2) {
+; SVE-LABEL: eor3_nxv4i32_left:
+; SVE:       // %bb.0:
+; SVE-NEXT:    eor z0.d, z0.d, z1.d
+; SVE-NEXT:    eor z0.d, z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: eor3_nxv4i32_left:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    eor3 z0.d, z0.d, z1.d, z2.d
+; SVE2-NEXT:    ret
+  %4 = xor <vscale x 4 x i32> %0, %1
+  %5 = xor <vscale x 4 x i32> %4, %2
+  ret <vscale x 4 x i32> %5
+}
+
+define <vscale x 4 x i32> @eor3_nxv4i32_right(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2) {
+; SVE-LABEL: eor3_nxv4i32_right:
+; SVE:       // %bb.0:
+; SVE-NEXT:    eor z0.d, z0.d, z1.d
+; SVE-NEXT:    eor z0.d, z2.d, z0.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: eor3_nxv4i32_right:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    eor3 z2.d, z2.d, z0.d, z1.d
+; SVE2-NEXT:    mov z0.d, z2.d
+; SVE2-NEXT:    ret
+  %4 = xor <vscale x 4 x i32> %0, %1
+  %5 = xor <vscale x 4 x i32> %2, %4
+  ret <vscale x 4 x i32> %5
+}
+
+define <vscale x 2 x i64> @eor3_nxv2i64_left(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2) {
+; SVE-LABEL: eor3_nxv2i64_left:
+; SVE:       // %bb.0:
+; SVE-NEXT:    eor z0.d, z0.d, z1.d
+; SVE-NEXT:    eor z0.d, z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: eor3_nxv2i64_left:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    eor3 z0.d, z0.d, z1.d, z2.d
+; SVE2-NEXT:    ret
+  %4 = xor <vscale x 2 x i64> %0, %1
+  %5 = xor <vscale x 2 x i64> %4, %2
+  ret <vscale x 2 x i64> %5
+}
+
+define <vscale x 2 x i64> @eor3_nxv2i64_right(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2) {
+; SVE-LABEL: eor3_nxv2i64_right:
+; SVE:       // %bb.0:
+; SVE-NEXT:    eor z0.d, z0.d, z1.d
+; SVE-NEXT:    eor z0.d, z2.d, z0.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: eor3_nxv2i64_right:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    eor3 z2.d, z2.d, z0.d, z1.d
+; SVE2-NEXT:    mov z0.d, z2.d
+; SVE2-NEXT:    ret
+  %4 = xor <vscale x 2 x i64> %0, %1
+  %5 = xor <vscale x 2 x i64> %2, %4
+  ret <vscale x 2 x i64> %5
+}
+
+define <vscale x 2 x i64> @eor3_vnot(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1) {
+; SVE-LABEL: eor3_vnot:
+; SVE:       // %bb.0:
+; SVE-NEXT:    eor z0.d, z0.d, z1.d
+; SVE-NEXT:    ret
+;
+; SVE2-LABEL: eor3_vnot:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    eor z0.d, z0.d, z1.d
+; SVE2-NEXT:    ret
+  %3 = xor <vscale x 2 x i64> %0, zeroinitializer
+  %4 = xor <vscale x 2 x i64> %3, %1
+  ret <vscale x 2 x i64> %4
+}
+


        


More information about the llvm-commits mailing list