[llvm] 6d0c3eb - [AArch64] Add SVE int instructions to isAssociativeAndCommutative

KAWASHIMA Takahiro via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 9 17:43:22 PST 2023


Author: KAWASHIMA Takahiro
Date: 2023-01-10T10:39:49+09:00
New Revision: 6d0c3eb49d01b94a51b6c15c7423cb15af89792e

URL: https://github.com/llvm/llvm-project/commit/6d0c3eb49d01b94a51b6c15c7423cb15af89792e
DIFF: https://github.com/llvm/llvm-project/commit/6d0c3eb49d01b94a51b6c15c7423cb15af89792e.diff

LOG: [AArch64] Add SVE int instructions to isAssociativeAndCommutative

Differential Revision: https://reviews.llvm.org/D140398

Added: 
    llvm/test/CodeGen/AArch64/machine-combiner-eor.mir

Modified: 
    llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
    llvm/test/CodeGen/AArch64/machine-combiner.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 58844e368ae7..a10d8bc5d29f 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -5029,6 +5029,18 @@ bool AArch64InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
   case AArch64::ORRv16i8:
   case AArch64::EORv8i8:
   case AArch64::EORv16i8:
+  // -- SVE instructions --
+  case AArch64::ADD_ZZZ_B:
+  case AArch64::ADD_ZZZ_H:
+  case AArch64::ADD_ZZZ_S:
+  case AArch64::ADD_ZZZ_D:
+  case AArch64::MUL_ZZZ_B:
+  case AArch64::MUL_ZZZ_H:
+  case AArch64::MUL_ZZZ_S:
+  case AArch64::MUL_ZZZ_D:
+  case AArch64::AND_ZZZ:
+  case AArch64::ORR_ZZZ:
+  case AArch64::EOR_ZZZ:
     return true;
 
   default:

diff  --git a/llvm/test/CodeGen/AArch64/machine-combiner-eor.mir b/llvm/test/CodeGen/AArch64/machine-combiner-eor.mir
new file mode 100644
index 000000000000..8daf7e55fb53
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/machine-combiner-eor.mir
@@ -0,0 +1,85 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=neoverse-n2 -run-pass machine-combiner \
+# RUN:     -verify-machineinstrs  -o - %s | FileCheck %s
+
+# Most machine-combiner reassociation tests are put in machine-combiner.ll.
+# However, we cannot test reassociation of SVE EORs in machine-combiner.ll
+# because isel combines two EORs to one EOR3. So SVE EOR-specific test is put
+# in this file.
+
+---
+
+name:            reassociate_xors_sve_1
+registers:
+  - { id: 0, class: zpr, preferred-register: '' }
+  - { id: 1, class: zpr, preferred-register: '' }
+  - { id: 2, class: zpr, preferred-register: '' }
+  - { id: 3, class: zpr, preferred-register: '' }
+  - { id: 4, class: zpr, preferred-register: '' }
+  - { id: 5, class: zpr, preferred-register: '' }
+  - { id: 6, class: zpr, preferred-register: '' }
+body:             |
+  bb.0:
+    liveins: $z0, $z1, $z2, $z3
+
+    ; CHECK-LABEL: name: reassociate_xors_sve_1
+    ; CHECK: liveins: $z0, $z1, $z2, $z3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:zpr = COPY $z3
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:zpr = COPY $z2
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:zpr = COPY $z1
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:zpr = COPY $z0
+    ; CHECK-NEXT: [[EOR_ZZZ:%[0-9]+]]:zpr = EOR_ZZZ [[COPY3]], [[COPY2]]
+    ; CHECK-NEXT: [[EOR_ZZZ1:%[0-9]+]]:zpr = EOR_ZZZ [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[EOR_ZZZ2:%[0-9]+]]:zpr = EOR_ZZZ killed [[EOR_ZZZ]], killed [[EOR_ZZZ1]]
+    ; CHECK-NEXT: $z0 = COPY [[EOR_ZZZ2]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $z0
+    %3:zpr = COPY $z3
+    %2:zpr = COPY $z2
+    %1:zpr = COPY $z1
+    %0:zpr = COPY $z0
+    %4:zpr = EOR_ZZZ %0, %1
+    %5:zpr = EOR_ZZZ killed %4, %2
+    %6:zpr = EOR_ZZZ killed %5, %3
+    $z0 = COPY %6
+    RET_ReallyLR implicit $z0
+
+...
+---
+
+name:            reassociate_xors_sve_2
+registers:
+  - { id: 0, class: zpr, preferred-register: '' }
+  - { id: 1, class: zpr, preferred-register: '' }
+  - { id: 2, class: zpr, preferred-register: '' }
+  - { id: 3, class: zpr, preferred-register: '' }
+  - { id: 4, class: zpr, preferred-register: '' }
+  - { id: 5, class: zpr, preferred-register: '' }
+  - { id: 6, class: zpr, preferred-register: '' }
+body:             |
+  bb.0:
+    liveins: $z0, $z1, $z2, $z3
+
+    ; CHECK-LABEL: name: reassociate_xors_sve_2
+    ; CHECK: liveins: $z0, $z1, $z2, $z3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:zpr = COPY $z3
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:zpr = COPY $z2
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:zpr = COPY $z1
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:zpr = COPY $z0
+    ; CHECK-NEXT: [[AND_ZZZ:%[0-9]+]]:zpr = AND_ZZZ [[COPY3]], [[COPY2]]
+    ; CHECK-NEXT: [[EOR_ZZZ:%[0-9]+]]:zpr = EOR_ZZZ [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[EOR_ZZZ1:%[0-9]+]]:zpr = EOR_ZZZ killed [[AND_ZZZ]], killed [[EOR_ZZZ]]
+    ; CHECK-NEXT: $z0 = COPY [[EOR_ZZZ1]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $z0
+    %3:zpr = COPY $z3
+    %2:zpr = COPY $z2
+    %1:zpr = COPY $z1
+    %0:zpr = COPY $z0
+    %4:zpr = AND_ZZZ %0, %1
+    %5:zpr = EOR_ZZZ killed %4, %2
+    %6:zpr = EOR_ZZZ killed %5, %3
+    $z0 = COPY %6
+    RET_ReallyLR implicit $z0
+
+...

diff  --git a/llvm/test/CodeGen/AArch64/machine-combiner.ll b/llvm/test/CodeGen/AArch64/machine-combiner.ll
index db65bc334696..ea7d102cce08 100644
--- a/llvm/test/CodeGen/AArch64/machine-combiner.ll
+++ b/llvm/test/CodeGen/AArch64/machine-combiner.ll
@@ -663,6 +663,88 @@ define <vscale x 2 x double> @reassociate_muls_nxv2f64(<vscale x 2 x double> %x0
   ret <vscale x 2 x double> %t2
 }
 
+; Verify that scalable vector integer arithmetic operations are reassociated.
+
+define <vscale x 16 x i8> @reassociate_muls_nxv16i8(<vscale x 16 x i8> %x0, <vscale x 16 x i8> %x1, <vscale x 16 x i8> %x2, <vscale x 16 x i8> %x3) {
+; CHECK-LABEL: reassociate_muls_nxv16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mul z0.b, z0.b, z1.b
+; CHECK-NEXT:    mul z1.b, z3.b, z2.b
+; CHECK-NEXT:    mul z0.b, z1.b, z0.b
+; CHECK-NEXT:    ret
+  %t0 = mul <vscale x 16 x i8> %x0, %x1
+  %t1 = mul <vscale x 16 x i8> %x2, %t0
+  %t2 = mul <vscale x 16 x i8> %x3, %t1
+  ret <vscale x 16 x i8> %t2
+}
+
+define <vscale x 8 x i16> @reassociate_adds_nxv8i16(<vscale x 8 x i16> %x0, <vscale x 8 x i16> %x1, <vscale x 8 x i16> %x2, <vscale x 8 x i16> %x3) {
+; CHECK-LABEL: reassociate_adds_nxv8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add z0.h, z0.h, z1.h
+; CHECK-NEXT:    add z1.h, z3.h, z2.h
+; CHECK-NEXT:    add z0.h, z1.h, z0.h
+; CHECK-NEXT:    ret
+  %t0 = add <vscale x 8 x i16> %x0, %x1
+  %t1 = add <vscale x 8 x i16> %x2, %t0
+  %t2 = add <vscale x 8 x i16> %x3, %t1
+  ret <vscale x 8 x i16> %t2
+}
+
+define <vscale x 4 x i32> @reassociate_muls_nxv4i32(<vscale x 4 x i32> %x0, <vscale x 4 x i32> %x1, <vscale x 4 x i32> %x2, <vscale x 4 x i32> %x3) {
+; CHECK-LABEL: reassociate_muls_nxv4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mul z0.s, z0.s, z1.s
+; CHECK-NEXT:    mul z1.s, z3.s, z2.s
+; CHECK-NEXT:    mul z0.s, z1.s, z0.s
+; CHECK-NEXT:    ret
+  %t0 = mul <vscale x 4 x i32> %x0, %x1
+  %t1 = mul <vscale x 4 x i32> %x2, %t0
+  %t2 = mul <vscale x 4 x i32> %x3, %t1
+  ret <vscale x 4 x i32> %t2
+}
+
+define <vscale x 2 x i64> @reassociate_adds_nxv2i64(<vscale x 2 x i64> %x0, <vscale x 2 x i64> %x1, <vscale x 2 x i64> %x2, <vscale x 2 x i64> %x3) {
+; CHECK-LABEL: reassociate_adds_nxv2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add z0.d, z0.d, z1.d
+; CHECK-NEXT:    add z1.d, z3.d, z2.d
+; CHECK-NEXT:    add z0.d, z1.d, z0.d
+; CHECK-NEXT:    ret
+  %t0 = add <vscale x 2 x i64> %x0, %x1
+  %t1 = add <vscale x 2 x i64> %x2, %t0
+  %t2 = add <vscale x 2 x i64> %x3, %t1
+  ret <vscale x 2 x i64> %t2
+}
+
+; Verify that scalable vector bitwise operations are reassociated.
+
+define <vscale x 16 x i8> @reassociate_ands_nxv16i8(<vscale x 16 x i8> %x0, <vscale x 16 x i8> %x1, <vscale x 16 x i8> %x2, <vscale x 16 x i8> %x3) {
+; CHECK-LABEL: reassociate_ands_nxv16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    and z1.d, z2.d, z3.d
+; CHECK-NEXT:    and z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %t0 = or <vscale x 16 x i8> %x0, %x1
+  %t1 = and <vscale x 16 x i8> %t0, %x2
+  %t2 = and <vscale x 16 x i8> %t1, %x3
+  ret <vscale x 16 x i8> %t2
+}
+
+define <vscale x 8 x i16> @reassociate_ors_nxv8i16(<vscale x 8 x i16> %x0, <vscale x 8 x i16> %x1, <vscale x 8 x i16> %x2, <vscale x 8 x i16> %x3) {
+; CHECK-LABEL: reassociate_ors_nxv8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    eor z0.d, z0.d, z1.d
+; CHECK-NEXT:    orr z1.d, z2.d, z3.d
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %t0 = xor <vscale x 8 x i16> %x0, %x1
+  %t1 = or <vscale x 8 x i16> %t0, %x2
+  %t2 = or <vscale x 8 x i16> %t1, %x3
+  ret <vscale x 8 x i16> %t2
+}
+
 ; PR25016: https://llvm.org/bugs/show_bug.cgi?id=25016
 ; Verify that reassociation is not happening needlessly or wrongly.
 


        


More information about the llvm-commits mailing list