[llvm] [GISel][CombinerHelper] Combine and(trunc(x), trunc(y)) -> trunc(and(x, y)) (PR #89023)

Dhruv Chawla via llvm-commits llvm-commits at lists.llvm.org
Mon May 13 23:13:03 PDT 2024


================
@@ -0,0 +1,327 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -o - -mtriple=aarch64-unknown-unknown -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs  %s | FileCheck %s
+
+# Truncs with a single use get folded.
+
+# and(trunc(x), trunc(y)) -> trunc(and(x, y))
+---
+name:            and_trunc
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+    ; CHECK-LABEL: name: and_trunc
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %4:_(s16) = G_AND %2, %3
+    %5:_(s32) = G_ANYEXT %4
+    $w0 = COPY %5
+...
+---
+name:            and_trunc_vector
+body:             |
+  bb.0:
+    liveins: $q0, $q1
+    ; CHECK-LABEL: name: and_trunc_vector
+    ; CHECK: liveins: $q0, $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[AND]](<4 x s32>)
+    ; CHECK-NEXT: $x0 = COPY [[TRUNC]](<4 x s16>)
+    %0:_(<4 x s32>) = COPY $q0
+    %1:_(<4 x s32>) = COPY $q1
+    %2:_(<4 x s16>) = G_TRUNC %0
+    %3:_(<4 x s16>) = G_TRUNC %1
+    %4:_(<4 x s16>) = G_AND %2, %3
+    $x0 = COPY %4
+...
+
+# or(trunc(x), trunc(y)) -> trunc(or(x, y))
+---
+name:            or_trunc
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+    ; CHECK-LABEL: name: or_trunc
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %4:_(s16) = G_OR %2, %3
+    %5:_(s32) = G_ANYEXT %4
+    $w0 = COPY %5
+...
+---
+name:            or_trunc_vector
+body:             |
+  bb.0:
+    liveins: $q0, $q1
+    ; CHECK-LABEL: name: or_trunc_vector
+    ; CHECK: liveins: $q0, $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<4 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[OR]](<4 x s32>)
+    ; CHECK-NEXT: $x0 = COPY [[TRUNC]](<4 x s16>)
+    %0:_(<4 x s32>) = COPY $q0
+    %1:_(<4 x s32>) = COPY $q1
+    %2:_(<4 x s16>) = G_TRUNC %0
+    %3:_(<4 x s16>) = G_TRUNC %1
+    %4:_(<4 x s16>) = G_OR %2, %3
+    $x0 = COPY %4
+...
+
+# xor(trunc(x), trunc(y)) -> trunc(xor(x, y))
+---
+name:            xor_trunc
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+    ; CHECK-LABEL: name: xor_trunc
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $w0 = COPY [[XOR]](s32)
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %4:_(s16) = G_XOR %2, %3
+    %5:_(s32) = G_ANYEXT %4
+    $w0 = COPY %5
+...
+---
+name:            xor_trunc_vector
+body:             |
+  bb.0:
+    liveins: $q0, $q1
+    ; CHECK-LABEL: name: xor_trunc_vector
+    ; CHECK: liveins: $q0, $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s32>) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[XOR]](<4 x s32>)
+    ; CHECK-NEXT: $x0 = COPY [[TRUNC]](<4 x s16>)
+    %0:_(<4 x s32>) = COPY $q0
+    %1:_(<4 x s32>) = COPY $q1
+    %2:_(<4 x s16>) = G_TRUNC %0
+    %3:_(<4 x s16>) = G_TRUNC %1
+    %4:_(<4 x s16>) = G_XOR %2, %3
+    $x0 = COPY %4
+...
+
+# Truncs with multiple uses do not get folded.
+---
+name:            or_trunc_multiuse_1
+body:             |
+  bb.0:
+    liveins: $w0, $w1, $x2
+    ; CHECK-LABEL: name: or_trunc_multiuse_1
+    ; CHECK: liveins: $w0, $w1, $x2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK-NEXT: G_STORE [[TRUNC]](s16), [[COPY2]](p0) :: (store (s16))
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
+    %5:_(p0) = COPY $x2
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    G_STORE %2, %5 :: (store (s16))
+    %4:_(s16) = G_OR %2, %3
+    %6:_(s32) = G_ANYEXT %4
+    $w0 = COPY %6
+...
+---
+name:            and_trunc_multiuse_2
+body:             |
+  bb.0:
+    liveins: $w0, $w1, $x2
+    ; CHECK-LABEL: name: and_trunc_multiuse_2
+    ; CHECK: liveins: $w0, $w1, $x2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK-NEXT: G_STORE [[TRUNC]](s16), [[COPY2]](p0) :: (store (s16))
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s16)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
+    %5:_(p0) = COPY $x2
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    G_STORE %2, %5 :: (store (s16))
+    %4:_(s16) = G_AND %2, %3
+    %6:_(s32) = G_ANYEXT %4
+    $w0 = COPY %6
+...
+---
+name:            xor_trunc_vector_multiuse
+body:             |
+  bb.0:
+    liveins: $w0, $w1, $x2
+    ; CHECK-LABEL: name: xor_trunc_vector_multiuse
+    ; CHECK: liveins: $w0, $w1, $x2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[COPY]](<4 x s32>)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[COPY1]](<4 x s32>)
+    ; CHECK-NEXT: G_STORE [[TRUNC]](<4 x s16>), [[COPY2]](p0) :: (store (<4 x s16>))
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s16>) = G_XOR [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: $x0 = COPY [[XOR]](<4 x s16>)
+    %0:_(<4 x s32>) = COPY $q0
+    %1:_(<4 x s32>) = COPY $q1
+    %5:_(p0) = COPY $x2
+    %2:_(<4 x s16>) = G_TRUNC %0
+    %3:_(<4 x s16>) = G_TRUNC %1
+    G_STORE %2, %5 :: (store (<4 x s16>))
+    %4:_(<4 x s16>) = G_XOR %2, %3
+    $x0 = COPY %4
+...
+
+# Freezes should get pushed through truncs.
----------------
dc03-work wrote:

These folds will trigger once #90618 lands.

https://github.com/llvm/llvm-project/pull/89023


More information about the llvm-commits mailing list