[llvm] 9c60010 - [GlobalIsel] Combine G_ADD and G_SUB (#92879)

via llvm-commits llvm-commits at lists.llvm.org
Thu May 23 07:28:37 PDT 2024


Author: Thorsten Schütt
Date: 2024-05-23T16:28:32+02:00
New Revision: 9c60010a61e5824c64706ff21a1222da1f6b3af9

URL: https://github.com/llvm/llvm-project/commit/9c60010a61e5824c64706ff21a1222da1f6b3af9
DIFF: https://github.com/llvm/llvm-project/commit/9c60010a61e5824c64706ff21a1222da1f6b3af9.diff

LOG: [GlobalIsel] Combine G_ADD and G_SUB (#92879)

Added: 
    llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir

Modified: 
    llvm/include/llvm/Target/GlobalISel/Combine.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 34698f195615b..8012f91922777 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1641,6 +1641,78 @@ extract_vector_element_shuffle_vector,
 insert_vector_element_extract_vector_element
 ]>;
 
+
+// fold ((0-A) + B) -> B-A
+def ZeroMinusAPlusB : GICombineRule<
+   (defs root:$root),
+   (match (G_SUB $sub, 0, $A),
+          (G_ADD $root, $sub, $B)),
+   (apply (G_SUB $root, $B, $A))>;
+
+// fold (A + (0-B)) -> A-B
+def APlusZeroMinusB : GICombineRule<
+   (defs root:$root),
+   (match (G_SUB $sub, 0, $B),
+          (G_ADD $root, $A, $sub)),
+   (apply (G_SUB $root, $A, $B))>;
+
+ // fold (A+(B-A)) -> B
+ def APlusBMinusB : GICombineRule<
+   (defs root:$root),
+   (match (G_SUB $sub, $B, $A),
+          (G_ADD $root, $A, $sub)),
+   (apply (GIReplaceReg $root, $B))>;
+
+// fold ((B-A)+A) -> B
+ def BMinusAPlusA : GICombineRule<
+   (defs root:$root),
+   (match (G_SUB $sub, $B, $A),
+          (G_ADD $root, $sub, $A)),
+   (apply (GIReplaceReg $root, $B))>;
+
+// fold ((A-B)+(C-A)) -> (C-B)
+def AMinusBPlusCMinusA : GICombineRule<
+   (defs root:$root),
+   (match (G_SUB $sub1, $A, $B),
+          (G_SUB $sub2, $C, $A),
+          (G_ADD $root, $sub1, $sub2)),
+   (apply (G_SUB $root, $C, $B))>;
+
+// fold ((A-B)+(B-C)) -> (A-C)
+def AMinusBPlusBMinusC : GICombineRule<
+   (defs root:$root),
+   (match (G_SUB $sub1, $A, $B),
+          (G_SUB $sub2, $B, $C),
+          (G_ADD $root, $sub1, $sub2)),
+   (apply (G_SUB $root, $A, $C))>;
+
+// fold (A+(B-(A+C))) to (B-C)
+def APlusBMinusAplusC : GICombineRule<
+   (defs root:$root),
+   (match (G_ADD $add1, $A, $C),
+          (G_SUB $sub1, $B, $add1),
+          (G_ADD $root, $A, $sub1)),
+   (apply (G_SUB $root, $B, $C))>;
+
+// fold (A+(B-(C+A))) to (B-C)
+def APlusBMinusCPlusA : GICombineRule<
+   (defs root:$root),
+   (match (G_ADD $add1, $C, $A),
+          (G_SUB $sub1, $B, $add1),
+          (G_ADD $root, $A, $sub1)),
+   (apply (G_SUB $root, $B, $C))>;
+
+def integer_reassoc_combines: GICombineGroup<[
+  ZeroMinusAPlusB,
+  APlusZeroMinusB,
+  APlusBMinusB,
+  BMinusAPlusA,
+  AMinusBPlusCMinusA,
+  AMinusBPlusBMinusC,
+  APlusBMinusAplusC,
+  APlusBMinusCPlusA
+]>;
+
 // FIXME: These should use the custom predicate feature once it lands.
 def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
                                      undef_to_negative_one,
@@ -1698,7 +1770,8 @@ def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma,
 def constant_fold_binops : GICombineGroup<[constant_fold_binop,
                                            constant_fold_fp_binop]>;
 
-def all_combines : GICombineGroup<[trivial_combines, vector_ops_combines,
+def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines,
+    vector_ops_combines,
     insert_vec_elt_combines, extract_vec_elt_combines, combines_for_extload,
     combine_extracted_vector_load,
     undef_combines, identity_combines, phi_combines,

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir
new file mode 100644
index 0000000000000..be33f9f7b284b
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir
@@ -0,0 +1,252 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner %s -o - | FileCheck %s
+
+
+---
+name:   ZeroMinusAPlusB
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: ZeroMinusAPlusB
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s32) = COPY $w0
+    ; CHECK-NEXT: %b:_(s32) = COPY $w0
+    ; CHECK-NEXT: %add:_(s32) = G_SUB %b, %a
+    ; CHECK-NEXT: $w0 = COPY %add(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %x:_(s32) = COPY $w0
+    %a:_(s32) = COPY $w0
+    %b:_(s32) = COPY $w0
+    %zero:_(s32) = G_CONSTANT i32 0
+    %sub:_(s32) = G_SUB %zero, %a
+    %add:_(s32) = G_ADD %sub, %b
+    $w0 = COPY %add
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:   ZeroMinusAPlusB_multi_use
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: ZeroMinusAPlusB_multi_use
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s32) = COPY $w0
+    ; CHECK-NEXT: %b:_(s32) = COPY $w0
+    ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: %sub:_(s32) = G_SUB %zero, %a
+    ; CHECK-NEXT: %add:_(s32) = G_SUB %b, %a
+    ; CHECK-NEXT: $w0 = COPY %add(s32)
+    ; CHECK-NEXT: $w0 = COPY %sub(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %x:_(s32) = COPY $w0
+    %a:_(s32) = COPY $w0
+    %b:_(s32) = COPY $w0
+    %zero:_(s32) = G_CONSTANT i32 0
+    %sub:_(s32) = G_SUB %zero, %a
+    %add:_(s32) = G_ADD %sub, %b
+    $w0 = COPY %add
+    $w0 = COPY %sub
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:   APlusZeroMiunusB
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: APlusZeroMiunusB
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s64) = COPY $x1
+    ; CHECK-NEXT: %b:_(s64) = COPY $x2
+    ; CHECK-NEXT: %add:_(s64) = G_SUB %a, %b
+    ; CHECK-NEXT: $x0 = COPY %add(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %x:_(s64) = COPY $x0
+    %a:_(s64) = COPY $x1
+    %b:_(s64) = COPY $x2
+    %zero:_(s64) = G_CONSTANT i64 0
+    %sub:_(s64) = G_SUB %zero, %b
+    %add:_(s64) = G_ADD %a, %sub
+    $x0 = COPY %add
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:   APlusBMinusB
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: APlusBMinusB
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %b:_(s64) = COPY $x1
+    ; CHECK-NEXT: $x0 = COPY %b(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %a:_(s64) = COPY $x0
+    %b:_(s64) = COPY $x1
+    %zero:_(s64) = G_CONSTANT i64 0
+    %sub:_(s64) = G_SUB %b, %a
+    %add:_(s64) = G_ADD %a, %sub
+    $x0 = COPY %add
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:   BMinusAPlusA
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: BMinusAPlusA
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %b:_(s64) = COPY $x1
+    ; CHECK-NEXT: $x0 = COPY %b(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %a:_(s64) = COPY $x0
+    %b:_(s64) = COPY $x1
+    %zero:_(s64) = G_CONSTANT i64 0
+    %sub:_(s64) = G_SUB %b, %a
+    %add:_(s64) = G_ADD %sub, %a
+    $x0 = COPY %add
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:   AMinusBPlusCMinusA
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: AMinusBPlusCMinusA
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %b:_(s64) = COPY $x1
+    ; CHECK-NEXT: %c:_(s64) = COPY $x2
+    ; CHECK-NEXT: %add:_(s64) = G_SUB %c, %b
+    ; CHECK-NEXT: $x0 = COPY %add(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %a:_(s64) = COPY $x0
+    %b:_(s64) = COPY $x1
+    %c:_(s64) = COPY $x2
+    %zero:_(s64) = G_CONSTANT i64 0
+    %sub2:_(s64) = G_SUB %c, %a
+    %sub1:_(s64) = G_SUB %a, %b
+    %add:_(s64) = G_ADD %sub1, %sub2
+    $x0 = COPY %add
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:   AMinusBPlusBMinusC
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: AMinusBPlusBMinusC
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s64) = COPY $x0
+    ; CHECK-NEXT: %c:_(s64) = COPY $x2
+    ; CHECK-NEXT: %add:_(s64) = G_SUB %a, %c
+    ; CHECK-NEXT: $x0 = COPY %add(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %a:_(s64) = COPY $x0
+    %b:_(s64) = COPY $x1
+    %c:_(s64) = COPY $x2
+    %zero:_(s64) = G_CONSTANT i64 0
+    %sub2:_(s64) = G_SUB %b, %c
+    %sub1:_(s64) = G_SUB %a, %b
+    %add:_(s64) = G_ADD %sub1, %sub2
+    $x0 = COPY %add
+    RET_ReallyLR implicit $x0
+
+
+...
+---
+name:   APlusBMinusAplusC
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: APlusBMinusAplusC
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %b:_(s64) = COPY $x1
+    ; CHECK-NEXT: %c:_(s64) = COPY $x2
+    ; CHECK-NEXT: %add:_(s64) = G_SUB %b, %c
+    ; CHECK-NEXT: $x0 = COPY %add(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %a:_(s64) = COPY $x0
+    %b:_(s64) = COPY $x1
+    %c:_(s64) = COPY $x2
+    %zero:_(s64) = G_CONSTANT i64 0
+    %add1:_(s64) = G_ADD %a, %c
+    %sub1:_(s64) = G_SUB %b, %add1
+    %add:_(s64) = G_ADD %a, %sub1
+    $x0 = COPY %add
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:   APlusBMinusCPlusA
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: APlusBMinusCPlusA
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %b:_(s64) = COPY $x1
+    ; CHECK-NEXT: %c:_(s64) = COPY $x2
+    ; CHECK-NEXT: %add:_(s64) = G_SUB %b, %c
+    ; CHECK-NEXT: $x0 = COPY %add(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %a:_(s64) = COPY $x0
+    %b:_(s64) = COPY $x1
+    %c:_(s64) = COPY $x2
+    %zero:_(s64) = G_CONSTANT i64 0
+    %add1:_(s64) = G_ADD %c, %a
+    %sub1:_(s64) = G_SUB %b, %add1
+    %add:_(s64) = G_ADD %a, %sub1
+    $x0 = COPY %add
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:   APlusBMinusCPlusA_BV
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+
+    ; CHECK-LABEL: name: APlusBMinusCPlusA_BV
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a1:_(s64) = COPY $x0
+    ; CHECK-NEXT: %b1:_(s64) = COPY $x1
+    ; CHECK-NEXT: %c1:_(s64) = COPY $x2
+    ; CHECK-NEXT: %b:_(<2 x s64>) = G_BUILD_VECTOR %b1(s64), %ba:_(s64)
+    ; CHECK-NEXT: %c:_(<2 x s64>) = G_BUILD_VECTOR %a1(s64), %c1(s64)
+    ; CHECK-NEXT: %add:_(<2 x s64>) = G_SUB %b, %c
+    ; CHECK-NEXT: $q0 = COPY %add(<2 x s64>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %a1:_(s64) = COPY $x0
+    %b1:_(s64) = COPY $x1
+    %c1:_(s64) = COPY $x2
+    %a:_(<2 x s64>) = G_BUILD_VECTOR %a1:_(s64), %b1:_(s64)
+    %b:_(<2 x s64>) = G_BUILD_VECTOR %b1:_(s64), %ba:_(s64)
+    %c:_(<2 x s64>) = G_BUILD_VECTOR %a1:_(s64), %c1:_(s64)
+    %zero:_(s64) = G_CONSTANT i64 0
+    %add1:_(<2 x s64>) = G_ADD %c, %a
+    %sub1:_(<2 x s64>) = G_SUB %b, %add1
+    %add:_(<2 x s64>) = G_ADD %a, %sub1
+    $q0 = COPY %add
+    RET_ReallyLR implicit $x0


        


More information about the llvm-commits mailing list