[llvm] 9f4caf5 - [AArch64] add tests for bitwise logic reassociation; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Sun Mar 13 08:31:15 PDT 2022


Author: Sanjay Patel
Date: 2022-03-13T11:12:30-04:00
New Revision: 9f4caf55dba417d4d67526d7bc8f23a12090bca9

URL: https://github.com/llvm/llvm-project/commit/9f4caf55dba417d4d67526d7bc8f23a12090bca9
DIFF: https://github.com/llvm/llvm-project/commit/9f4caf55dba417d4d67526d7bc8f23a12090bca9.diff

LOG: [AArch64] add tests for bitwise logic reassociation; NFC

Chooses from a variety of scalar/vector/illegal types
because that should not inhibit any folds.

Added: 
    llvm/test/CodeGen/AArch64/logic-reassociate.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/logic-reassociate.ll b/llvm/test/CodeGen/AArch64/logic-reassociate.ll
new file mode 100644
index 0000000000000..900a574f18fe5
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/logic-reassociate.ll
@@ -0,0 +1,146 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-- -o - %s | FileCheck %s
+
+define i32 @and_commute0(i32 %x, i32 %y) {
+; CHECK-LABEL: and_commute0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w0, w1
+; CHECK-NEXT:    and w0, w0, w8
+; CHECK-NEXT:    ret
+  %b = and i32 %x, %y
+  %b2 = and i32 %x, %b
+  ret i32 %b2
+}
+
+define i128 @and_commute1(i128 %x, i128 %y) {
+; CHECK-LABEL: and_commute1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and x8, x3, x1
+; CHECK-NEXT:    and x9, x2, x0
+; CHECK-NEXT:    and x0, x0, x9
+; CHECK-NEXT:    and x1, x1, x8
+; CHECK-NEXT:    ret
+  %b = and i128 %y, %x
+  %b2 = and i128 %x, %b
+  ret i128 %b2
+}
+
+define <4 x i32> @and_commute2(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: and_commute2:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and v1.16b, v0.16b, v1.16b
+; CHECK-NEXT:    and v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    ret
+  %b = and <4 x i32> %x, %y
+  %b2 = and <4 x i32> %b, %x
+  ret <4 x i32> %b2
+}
+
+define <8 x i16> @and_commute3(<8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: and_commute3:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and v1.16b, v1.16b, v0.16b
+; CHECK-NEXT:    and v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    ret
+  %b = and <8 x i16> %y, %x
+  %b2 = and <8 x i16> %b, %x
+  ret <8 x i16> %b2
+}
+
+define i16 @or_commute0(i16 %x, i16 %y) {
+; CHECK-LABEL: or_commute0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w8, w0, w1
+; CHECK-NEXT:    orr w0, w0, w8
+; CHECK-NEXT:    ret
+  %b = or i16 %x, %y
+  %b2 = or i16 %x, %b
+  ret i16 %b2
+}
+
+define i8 @or_commute1(i8 %x, i8 %y) {
+; CHECK-LABEL: or_commute1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w8, w1, w0
+; CHECK-NEXT:    orr w0, w0, w8
+; CHECK-NEXT:    ret
+  %b = or i8 %y, %x
+  %b2 = or i8 %x, %b
+  ret i8 %b2
+}
+
+define <2 x i64> @or_commute2(<2 x i64> %x, <2 x i64> %y) {
+; CHECK-LABEL: or_commute2:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr v1.16b, v0.16b, v1.16b
+; CHECK-NEXT:    orr v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    ret
+  %b = or <2 x i64> %x, %y
+  %b2 = or <2 x i64> %b, %x
+  ret <2 x i64> %b2
+}
+
+define <8 x i64> @or_commute3(<8 x i64> %x, <8 x i64> %y) {
+; CHECK-LABEL: or_commute3:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr v7.16b, v7.16b, v3.16b
+; CHECK-NEXT:    orr v6.16b, v6.16b, v2.16b
+; CHECK-NEXT:    orr v5.16b, v5.16b, v1.16b
+; CHECK-NEXT:    orr v4.16b, v4.16b, v0.16b
+; CHECK-NEXT:    orr v0.16b, v4.16b, v0.16b
+; CHECK-NEXT:    orr v1.16b, v5.16b, v1.16b
+; CHECK-NEXT:    orr v2.16b, v6.16b, v2.16b
+; CHECK-NEXT:    orr v3.16b, v7.16b, v3.16b
+; CHECK-NEXT:    ret
+  %b = or <8 x i64> %y, %x
+  %b2 = or <8 x i64> %b, %x
+  ret <8 x i64> %b2
+}
+
+define <16 x i8> @xor_commute0(<16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: xor_commute0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    eor v1.16b, v0.16b, v1.16b
+; CHECK-NEXT:    eor v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ret
+  %b = xor <16 x i8> %x, %y
+  %b2 = xor <16 x i8> %x, %b
+  ret <16 x i8> %b2
+}
+
+define <8 x i32> @xor_commute1(<8 x i32> %x, <8 x i32> %y) {
+; CHECK-LABEL: xor_commute1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    eor v2.16b, v2.16b, v0.16b
+; CHECK-NEXT:    eor v3.16b, v3.16b, v1.16b
+; CHECK-NEXT:    eor v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    eor v1.16b, v1.16b, v3.16b
+; CHECK-NEXT:    ret
+  %b = xor <8 x i32> %y, %x
+  %b2 = xor <8 x i32> %x, %b
+  ret <8 x i32> %b2
+}
+
+define i64 @xor_commute2(i64 %x, i64 %y) {
+; CHECK-LABEL: xor_commute2:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    eor x8, x0, x1
+; CHECK-NEXT:    eor x0, x8, x0
+; CHECK-NEXT:    ret
+  %b = xor i64 %x, %y
+  %b2 = xor i64 %b, %x
+  ret i64 %b2
+}
+
+define i78 @xor_commute3(i78 %x, i78 %y) {
+; CHECK-LABEL: xor_commute3:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    eor x8, x3, x1
+; CHECK-NEXT:    eor x9, x2, x0
+; CHECK-NEXT:    eor x0, x9, x0
+; CHECK-NEXT:    eor x1, x8, x1
+; CHECK-NEXT:    ret
+  %b = xor i78 %y, %x
+  %b2 = xor i78 %b, %x
+  ret i78 %b2
+}


        


More information about the llvm-commits mailing list