[llvm] [GISel][CombinerHelper] Combine and(trunc(x), trunc(y)) -> trunc(and(x, y)) (PR #89023)

Dhruv Chawla via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 17 21:52:21 PDT 2024


https://github.com/dc03-work updated https://github.com/llvm/llvm-project/pull/89023

>From 358016b0cffe077a0d83abb701e043f518eed971 Mon Sep 17 00:00:00 2001
From: Dhruv Chawla <dhruvc at nvidia.com>
Date: Wed, 10 Apr 2024 17:24:30 +0530
Subject: [PATCH 1/4] [GISel] Tests for G_AND fold

---
 .../AArch64/GlobalISel/combine-and-trunc.mir  | 177 ++++++++++++++++++
 1 file changed, 177 insertions(+)
 create mode 100644 llvm/test/CodeGen/AArch64/GlobalISel/combine-and-trunc.mir

diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-and-trunc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-and-trunc.mir
new file mode 100644
index 00000000000000..fff51f7191ec98
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-and-trunc.mir
@@ -0,0 +1,177 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -o - -mtriple=aarch64-unknown-unknown -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs  %s | FileCheck %s
+
+---
+name:            and_trunc
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+    ; CHECK-LABEL: name: and_trunc
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s16)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %4:_(s16) = G_AND %2, %3
+    %5:_(s32) = G_ANYEXT %4
+    $w0 = COPY %5
+...
+---
+name:            and_trunc_multiuse_1
+body:             |
+  bb.0:
+    liveins: $w0, $w1, $x2
+    ; CHECK-LABEL: name: and_trunc_multiuse_1
+    ; CHECK: liveins: $w0, $w1, $x2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK-NEXT: G_STORE [[TRUNC]](s16), [[COPY2]](p0) :: (store (s16))
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s16)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
+    %5:_(p0) = COPY $x2
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    G_STORE %2, %5 :: (store (s16))
+    %4:_(s16) = G_AND %2, %3
+    %6:_(s32) = G_ANYEXT %4
+    $w0 = COPY %6
+...
+---
+name:            and_trunc_multiuse_2
+body:             |
+  bb.0:
+    liveins: $w0, $w1, $x2
+    ; CHECK-LABEL: name: and_trunc_multiuse_2
+    ; CHECK: liveins: $w0, $w1, $x2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK-NEXT: G_STORE [[TRUNC]](s16), [[COPY2]](p0) :: (store (s16))
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s16)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
+    %5:_(p0) = COPY $x2
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    G_STORE %2, %5 :: (store (s16))
+    %4:_(s16) = G_AND %2, %3
+    %6:_(s32) = G_ANYEXT %4
+    $w0 = COPY %6
+...
+---
+name:            and_trunc_vector
+body:             |
+  bb.0:
+    liveins: $q0, $q1
+    ; CHECK-LABEL: name: and_trunc_vector
+    ; CHECK: liveins: $q0, $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[COPY]](<4 x s32>)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[COPY1]](<4 x s32>)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s16>) = G_AND [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: $x0 = COPY [[AND]](<4 x s16>)
+    %0:_(<4 x s32>) = COPY $q0
+    %1:_(<4 x s32>) = COPY $q1
+    %2:_(<4 x s16>) = G_TRUNC %0
+    %3:_(<4 x s16>) = G_TRUNC %1
+    %4:_(<4 x s16>) = G_AND %2, %3
+    $x0 = COPY %4
+...
+---
+name:            and_trunc_vector_multiuse
+body:             |
+  bb.0:
+    liveins: $q0, $q1, $x0
+    ; CHECK-LABEL: name: and_trunc_vector_multiuse
+    ; CHECK: liveins: $q0, $q1, $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[COPY]](<4 x s32>)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[COPY1]](<4 x s32>)
+    ; CHECK-NEXT: G_STORE [[TRUNC]](<4 x s16>), [[COPY2]](p0) :: (store (<4 x s16>))
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s16>) = G_AND [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: $x0 = COPY [[AND]](<4 x s16>)
+    %0:_(<4 x s32>) = COPY $q0
+    %1:_(<4 x s32>) = COPY $q1
+    %5:_(p0) = COPY $x2
+    %2:_(<4 x s16>) = G_TRUNC %0
+    %3:_(<4 x s16>) = G_TRUNC %1
+    G_STORE %2, %5 :: (store (<4 x s16>))
+    %4:_(<4 x s16>) = G_AND %2, %3
+    $x0 = COPY %4
+...
+---
+name:            and_trunc_freeze
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+    ; CHECK-LABEL: name: and_trunc_freeze
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s16) = G_FREEZE [[TRUNC]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[FREEZE]], [[TRUNC1]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s16)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %6:_(s16) = G_FREEZE %2
+    %4:_(s16) = G_AND %6, %3
+    %5:_(s32) = G_ANYEXT %4
+    $w0 = COPY %5
+...
+---
+name:            and_trunc_freeze_both
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+    ; CHECK-LABEL: name: and_trunc_freeze_both
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s16) = G_FREEZE [[TRUNC]]
+    ; CHECK-NEXT: [[FREEZE1:%[0-9]+]]:_(s16) = G_FREEZE [[TRUNC1]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[FREEZE]], [[FREEZE1]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s16)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %6:_(s16) = G_FREEZE %2
+    %7:_(s16) = G_FREEZE %3
+    %4:_(s16) = G_AND %6, %7
+    %5:_(s32) = G_ANYEXT %4
+    $w0 = COPY %5

>From 46f65d2060a777d617068de5b59e14a817d603a9 Mon Sep 17 00:00:00 2001
From: Dhruv Chawla <dhruvc at nvidia.com>
Date: Wed, 10 Apr 2024 14:35:11 +0530
Subject: [PATCH 2/4] [GISel][CombinerHelper] Combine and(trunc(x), trunc(y))
 -> trunc(and(x, y))

The "match_ands" pattern is also enabled in the
AArch64PostLegalizerCombiner.
---
 .../llvm/CodeGen/GlobalISel/CombinerHelper.h  |  3 +
 .../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 60 +++++++++++++++++++
 llvm/lib/Target/AArch64/AArch64Combine.td     |  2 +-
 .../AArch64/GlobalISel/combine-and-trunc.mir  | 34 ++++-------
 .../AArch64/GlobalISel/combine-select.mir     | 17 +++---
 ...izer-combiner-narrow-binop-feeding-add.mir |  7 +--
 6 files changed, 87 insertions(+), 36 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 3af32043391fec..0148911ae1ecc6 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -963,6 +963,9 @@ class CombinerHelper {
 
   // Simplify (cmp cc0 x, y) (&& or ||) (cmp cc1 x, y) -> cmp cc2 x, y.
   bool tryFoldLogicOfFCmps(GLogicalBinOp *Logic, BuildFnTy &MatchInfo);
+
+  // Simplify (trunc v1) && (trunc v2) -> trunc (v1 && v2)
+  bool tryFoldAndOfTruncs(GLogicalBinOp *Logical, BuildFnTy &MatchInfo);
 };
 } // namespace llvm
 
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 3829c33369b275..6933589d713fe6 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -6982,9 +6982,69 @@ bool CombinerHelper::tryFoldLogicOfFCmps(GLogicalBinOp *Logic,
   return false;
 }
 
+bool CombinerHelper::tryFoldAndOfTruncs(GLogicalBinOp *Logical,
+                                        BuildFnTy &MatchInfo) {
+  assert(Logical->getOpcode() == TargetOpcode::G_AND &&
+         "Expected to be called with G_AND!");
+  Register Dst = Logical->getOperand(0).getReg();
+  Register V1 = Logical->getOperand(1).getReg();
+  Register V2 = Logical->getOperand(2).getReg();
+
+  MachineInstr *V1MI = MRI.getUniqueVRegDef(V1);
+  MachineInstr *V2MI = MRI.getUniqueVRegDef(V2);
+  if (!V1MI || !V2MI)
+    return false;
+
+  bool V1Freeze = V1MI->getOpcode() == TargetOpcode::G_FREEZE;
+  bool V2Freeze = V2MI->getOpcode() == TargetOpcode::G_FREEZE;
+  if (V1Freeze)
+    V1 = V1MI->getOperand(1).getReg();
+  if (V2Freeze)
+    V2 = V2MI->getOperand(1).getReg();
+
+  Register V1Src, V2Src;
+  if (!mi_match(V1, MRI, m_GTrunc(m_Reg(V1Src))) ||
+      !mi_match(V2, MRI, m_GTrunc(m_Reg(V2Src))))
+    return false;
+  if (!MRI.hasOneNonDBGUse(V1) || !MRI.hasOneNonDBGUse(V2))
+    return false;
+
+  LLT V1Ty = MRI.getType(V1);
+  LLT V2Ty = MRI.getType(V2);
+  LLT V1SrcTy = MRI.getType(V1Src);
+  LLT V2SrcTy = MRI.getType(V2Src);
+
+  if (!isLegalOrBeforeLegalizer({TargetOpcode::G_AND, {V1SrcTy, V2SrcTy}}))
+    return false;
+
+  if (V1Ty != V2Ty || V1SrcTy != V2SrcTy)
+    return false;
+
+  MatchInfo = [=](MachineIRBuilder &B) {
+    Register Op0 = V1Src;
+    Register Op1 = V2Src;
+
+    if (V1Freeze)
+      Op0 = B.buildFreeze(V1SrcTy, V1Src).getReg(0);
+    if (V2Freeze)
+      Op1 = B.buildFreeze(V1SrcTy, V2Src).getReg(0);
+
+    auto And = B.buildAnd(V1SrcTy, Op0, Op1);
+    B.buildTrunc(Dst, And);
+
+    MRI.getUniqueVRegDef(V1)->eraseFromParent();
+    MRI.getUniqueVRegDef(V2)->eraseFromParent();
+  };
+
+  return true;
+}
+
 bool CombinerHelper::matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo) {
   GAnd *And = cast<GAnd>(&MI);
 
+  if (tryFoldAndOfTruncs(And, MatchInfo))
+    return true;
+
   if (tryFoldAndOrOrICmpsUsingRanges(And, MatchInfo))
     return true;
 
diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index 10cad6d1924407..eda7d925dade47 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -295,5 +295,5 @@ def AArch64PostLegalizerCombiner
                         ptr_add_immed_chain, overlapping_and,
                         split_store_zero_128, undef_combines,
                         select_to_minmax, or_to_bsp, combine_concat_vector,
-                        commute_constant_to_rhs]> {
+                        commute_constant_to_rhs, match_ands]> {
 }
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-and-trunc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-and-trunc.mir
index fff51f7191ec98..7f664850885836 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-and-trunc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-and-trunc.mir
@@ -11,11 +11,8 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
-    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[TRUNC1]]
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s16)
-    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = COPY $w1
     %2:_(s16) = G_TRUNC %0
@@ -88,10 +85,9 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[COPY]](<4 x s32>)
-    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[COPY1]](<4 x s32>)
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s16>) = G_AND [[TRUNC]], [[TRUNC1]]
-    ; CHECK-NEXT: $x0 = COPY [[AND]](<4 x s16>)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s32>) = G_AND [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[AND]](<4 x s32>)
+    ; CHECK-NEXT: $x0 = COPY [[TRUNC]](<4 x s16>)
     %0:_(<4 x s32>) = COPY $q0
     %1:_(<4 x s32>) = COPY $q1
     %2:_(<4 x s16>) = G_TRUNC %0
@@ -134,12 +130,9 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
-    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
-    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s16) = G_FREEZE [[TRUNC]]
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[FREEZE]], [[TRUNC1]]
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s16)
-    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[COPY]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FREEZE]], [[COPY1]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = COPY $w1
     %2:_(s16) = G_TRUNC %0
@@ -159,13 +152,10 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
-    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
-    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s16) = G_FREEZE [[TRUNC]]
-    ; CHECK-NEXT: [[FREEZE1:%[0-9]+]]:_(s16) = G_FREEZE [[TRUNC1]]
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[FREEZE]], [[FREEZE1]]
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s16)
-    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[COPY]]
+    ; CHECK-NEXT: [[FREEZE1:%[0-9]+]]:_(s32) = G_FREEZE [[COPY1]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FREEZE]], [[FREEZE1]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = COPY $w1
     %2:_(s16) = G_TRUNC %0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir
index 2bf7e84a379ba0..d4bca5dacf0a2b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir
@@ -1,7 +1,8 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs -mtriple aarch64-unknown- --aarch64postlegalizercombiner-only-enable-rule="select_to_logical" %s -o - | FileCheck %s
+# RUN: llc -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs -mtriple aarch64-unknown-unknown %s -o - | FileCheck %s
 # RUN: llc -debugify-and-strip-all-safe -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs -mtriple aarch64-unknown-unknown %s -o - | FileCheck %s
 # REQUIRES: asserts
+
 ---
 # select (c, x, x) -> x
 name:            test_combine_select_same_res
@@ -200,10 +201,9 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK-NEXT: %c:_(s1) = G_TRUNC [[COPY]](s64)
-    ; CHECK-NEXT: %t:_(s1) = G_TRUNC [[COPY1]](s64)
-    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s1) = G_FREEZE %t
-    ; CHECK-NEXT: %sel:_(s1) = G_AND %c, [[FREEZE]]
+    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s64) = G_FREEZE [[COPY1]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[FREEZE]]
+    ; CHECK-NEXT: %sel:_(s1) = G_TRUNC [[AND]](s64)
     ; CHECK-NEXT: %ext:_(s32) = G_ANYEXT %sel(s1)
     ; CHECK-NEXT: $w0 = COPY %ext(s32)
     %0:_(s64) = COPY $x0
@@ -228,10 +228,9 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK-NEXT: %c:_(s1) = G_TRUNC [[COPY]](s64)
-    ; CHECK-NEXT: %t:_(s1) = G_TRUNC [[COPY1]](s64)
-    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s1) = G_FREEZE %t
-    ; CHECK-NEXT: %sel:_(s1) = G_AND %c, [[FREEZE]]
+    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s64) = G_FREEZE [[COPY1]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[FREEZE]]
+    ; CHECK-NEXT: %sel:_(s1) = G_TRUNC [[AND]](s64)
     ; CHECK-NEXT: %ext:_(s32) = G_ANYEXT %sel(s1)
     ; CHECK-NEXT: $w0 = COPY %ext(s32)
     %0:_(s64) = COPY $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-narrow-binop-feeding-add.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-narrow-binop-feeding-add.mir
index fb19cda303d365..b2a9a802261252 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-narrow-binop-feeding-add.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-narrow-binop-feeding-add.mir
@@ -84,10 +84,9 @@ body:             |
     ; CHECK: liveins: $x0, $x1
     ; CHECK: %binop_lhs:_(s64) = COPY $x0
     ; CHECK: %binop_rhs:_(s64) = COPY $x1
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %binop_lhs(s64)
-    ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC %binop_rhs(s64)
-    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
-    ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[AND]](s32)
+    ; CHECK: %binop:_(s64) = G_AND %binop_lhs, %binop_rhs
+    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %binop(s64)
+    ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s32)
     ; CHECK: $x0 = COPY [[ZEXT]](s64)
     ; CHECK: RET_ReallyLR implicit $x0
     %binop_lhs:_(s64) = COPY $x0

>From 820cbf71b37599605bd3a7c55131da2a2e83fadf Mon Sep 17 00:00:00 2001
From: Dhruv Chawla <dhruvc at nvidia.com>
Date: Wed, 17 Apr 2024 16:43:22 +0530
Subject: [PATCH 3/4] Refactor the code

- Move logic to matchHoistLogicOpWithSameOpcodeHands
- Add the ability to look through freezes to matchHoistLogicOpWithSameOpcodeHands
---
 .../llvm/CodeGen/GlobalISel/CombinerHelper.h  |  3 -
 .../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 89 ++++++-------------
 llvm/lib/Target/AArch64/AArch64Combine.td     |  2 +-
 .../AArch64/GlobalISel/combine-and-trunc.mir  |  4 +-
 .../AArch64/GlobalISel/combine-select.mir     | 27 +++---
 ...izer-combiner-narrow-binop-feeding-add.mir | 11 ++-
 .../prelegalizercombiner-hoist-same-hands.mir |  7 +-
 7 files changed, 48 insertions(+), 95 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 0148911ae1ecc6..3af32043391fec 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -963,9 +963,6 @@ class CombinerHelper {
 
   // Simplify (cmp cc0 x, y) (&& or ||) (cmp cc1 x, y) -> cmp cc2 x, y.
   bool tryFoldLogicOfFCmps(GLogicalBinOp *Logic, BuildFnTy &MatchInfo);
-
-  // Simplify (trunc v1) && (trunc v2) -> trunc (v1 && v2)
-  bool tryFoldAndOfTruncs(GLogicalBinOp *Logical, BuildFnTy &MatchInfo);
 };
 } // namespace llvm
 
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 6933589d713fe6..5c5bde82cc544c 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -2983,6 +2983,17 @@ bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
   MachineInstr *RightHandInst = getDefIgnoringCopies(RHSReg, MRI);
   if (!LeftHandInst || !RightHandInst)
     return false;
+
+  // Look through freeze()s.
+  bool LeftHandFreeze = LeftHandInst->getOpcode() == TargetOpcode::G_FREEZE;
+  bool RightHandFreeze = RightHandInst->getOpcode() == TargetOpcode::G_FREEZE;
+  if (LeftHandFreeze)
+    LeftHandInst =
+        getDefIgnoringCopies(LeftHandInst->getOperand(1).getReg(), MRI);
+  if (RightHandFreeze)
+    RightHandInst =
+        getDefIgnoringCopies(RightHandInst->getOperand(1).getReg(), MRI);
+
   unsigned HandOpcode = LeftHandInst->getOpcode();
   if (HandOpcode != RightHandInst->getOpcode())
     return false;
@@ -3006,8 +3017,10 @@ bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
     return false;
   case TargetOpcode::G_ANYEXT:
   case TargetOpcode::G_SEXT:
-  case TargetOpcode::G_ZEXT: {
+  case TargetOpcode::G_ZEXT:
+  case TargetOpcode::G_TRUNC: {
     // Match: logic (ext X), (ext Y) --> ext (logic X, Y)
+    // Match: logic (trunc X), (trunc Y) -> trunc (logic X, Y)
     break;
   }
   case TargetOpcode::G_AND:
@@ -3032,8 +3045,18 @@ bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
   auto NewLogicDst = MRI.createGenericVirtualRegister(XTy);
   OperandBuildSteps LogicBuildSteps = {
       [=](MachineInstrBuilder &MIB) { MIB.addDef(NewLogicDst); },
-      [=](MachineInstrBuilder &MIB) { MIB.addReg(X); },
-      [=](MachineInstrBuilder &MIB) { MIB.addReg(Y); }};
+      [=](MachineInstrBuilder &MIB) mutable {
+        // freeze (hand (x, ...)) -> freeze(x)
+        if (LeftHandFreeze)
+          X = Builder.buildFreeze(XTy, X).getReg(0);
+        MIB.addReg(X);
+      },
+      [=](MachineInstrBuilder &MIB) mutable {
+        // freeze (hand (y, ...)) -> freeze(y)
+        if (RightHandFreeze)
+          Y = Builder.buildFreeze(YTy, Y).getReg(0);
+        MIB.addReg(Y);
+      }};
   InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps);
 
   // Steps to build hand (logic x, y), ...z
@@ -6982,69 +7005,9 @@ bool CombinerHelper::tryFoldLogicOfFCmps(GLogicalBinOp *Logic,
   return false;
 }
 
-bool CombinerHelper::tryFoldAndOfTruncs(GLogicalBinOp *Logical,
-                                        BuildFnTy &MatchInfo) {
-  assert(Logical->getOpcode() == TargetOpcode::G_AND &&
-         "Expected to be called with G_AND!");
-  Register Dst = Logical->getOperand(0).getReg();
-  Register V1 = Logical->getOperand(1).getReg();
-  Register V2 = Logical->getOperand(2).getReg();
-
-  MachineInstr *V1MI = MRI.getUniqueVRegDef(V1);
-  MachineInstr *V2MI = MRI.getUniqueVRegDef(V2);
-  if (!V1MI || !V2MI)
-    return false;
-
-  bool V1Freeze = V1MI->getOpcode() == TargetOpcode::G_FREEZE;
-  bool V2Freeze = V2MI->getOpcode() == TargetOpcode::G_FREEZE;
-  if (V1Freeze)
-    V1 = V1MI->getOperand(1).getReg();
-  if (V2Freeze)
-    V2 = V2MI->getOperand(1).getReg();
-
-  Register V1Src, V2Src;
-  if (!mi_match(V1, MRI, m_GTrunc(m_Reg(V1Src))) ||
-      !mi_match(V2, MRI, m_GTrunc(m_Reg(V2Src))))
-    return false;
-  if (!MRI.hasOneNonDBGUse(V1) || !MRI.hasOneNonDBGUse(V2))
-    return false;
-
-  LLT V1Ty = MRI.getType(V1);
-  LLT V2Ty = MRI.getType(V2);
-  LLT V1SrcTy = MRI.getType(V1Src);
-  LLT V2SrcTy = MRI.getType(V2Src);
-
-  if (!isLegalOrBeforeLegalizer({TargetOpcode::G_AND, {V1SrcTy, V2SrcTy}}))
-    return false;
-
-  if (V1Ty != V2Ty || V1SrcTy != V2SrcTy)
-    return false;
-
-  MatchInfo = [=](MachineIRBuilder &B) {
-    Register Op0 = V1Src;
-    Register Op1 = V2Src;
-
-    if (V1Freeze)
-      Op0 = B.buildFreeze(V1SrcTy, V1Src).getReg(0);
-    if (V2Freeze)
-      Op1 = B.buildFreeze(V1SrcTy, V2Src).getReg(0);
-
-    auto And = B.buildAnd(V1SrcTy, Op0, Op1);
-    B.buildTrunc(Dst, And);
-
-    MRI.getUniqueVRegDef(V1)->eraseFromParent();
-    MRI.getUniqueVRegDef(V2)->eraseFromParent();
-  };
-
-  return true;
-}
-
 bool CombinerHelper::matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo) {
   GAnd *And = cast<GAnd>(&MI);
 
-  if (tryFoldAndOfTruncs(And, MatchInfo))
-    return true;
-
   if (tryFoldAndOrOrICmpsUsingRanges(And, MatchInfo))
     return true;
 
diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index eda7d925dade47..10cad6d1924407 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -295,5 +295,5 @@ def AArch64PostLegalizerCombiner
                         ptr_add_immed_chain, overlapping_and,
                         split_store_zero_128, undef_combines,
                         select_to_minmax, or_to_bsp, combine_concat_vector,
-                        commute_constant_to_rhs, match_ands]> {
+                        commute_constant_to_rhs]> {
 }
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-and-trunc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-and-trunc.mir
index 7f664850885836..126ea25b45a3ff 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-and-trunc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-and-trunc.mir
@@ -130,8 +130,8 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND %8, [[COPY1]]
     ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[COPY]]
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FREEZE]], [[COPY1]]
     ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = COPY $w1
@@ -152,9 +152,9 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND %9, %10
     ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[COPY]]
     ; CHECK-NEXT: [[FREEZE1:%[0-9]+]]:_(s32) = G_FREEZE [[COPY1]]
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FREEZE]], [[FREEZE1]]
     ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = COPY $w1
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir
index d4bca5dacf0a2b..43cb540eeca176 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir
@@ -117,10 +117,9 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x2
-    ; CHECK-NEXT: %c:_(s1) = G_TRUNC [[COPY]](s64)
-    ; CHECK-NEXT: %f:_(s1) = G_TRUNC [[COPY1]](s64)
-    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s1) = G_FREEZE %f
-    ; CHECK-NEXT: %sel:_(s1) = G_OR %c, [[FREEZE]]
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], %11
+    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s64) = G_FREEZE [[COPY1]]
+    ; CHECK-NEXT: %sel:_(s1) = G_TRUNC [[OR]](s64)
     ; CHECK-NEXT: %ext:_(s32) = G_ANYEXT %sel(s1)
     ; CHECK-NEXT: $w0 = COPY %ext(s32)
     %0:_(s64) = COPY $x0
@@ -144,10 +143,9 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x2
-    ; CHECK-NEXT: %c:_(s1) = G_TRUNC [[COPY]](s64)
-    ; CHECK-NEXT: %f:_(s1) = G_TRUNC [[COPY1]](s64)
-    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s1) = G_FREEZE %f
-    ; CHECK-NEXT: %sel:_(s1) = G_OR %c, [[FREEZE]]
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], %12
+    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s64) = G_FREEZE [[COPY1]]
+    ; CHECK-NEXT: %sel:_(s1) = G_TRUNC [[OR]](s64)
     ; CHECK-NEXT: %ext:_(s32) = G_ANYEXT %sel(s1)
     ; CHECK-NEXT: $w0 = COPY %ext(s32)
     %0:_(s64) = COPY $x0
@@ -172,12 +170,9 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d2
-    ; CHECK-NEXT: %c:_(<2 x s1>) = G_TRUNC [[COPY]](<2 x s32>)
-    ; CHECK-NEXT: %f:_(<2 x s1>) = G_TRUNC [[COPY1]](<2 x s32>)
-    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<2 x s1>) = G_FREEZE %f
-    ; CHECK-NEXT: %sel:_(<2 x s1>) = G_OR %c, [[FREEZE]]
-    ; CHECK-NEXT: %ext:_(<2 x s32>) = G_ANYEXT %sel(<2 x s1>)
-    ; CHECK-NEXT: $d0 = COPY %ext(<2 x s32>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s32>) = G_OR [[COPY]], %13
+    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<2 x s32>) = G_FREEZE [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[OR]](<2 x s32>)
     %0:_(<2 x s32>) = COPY $d0
     %1:_(<2 x s32>) = COPY $d1
     %2:_(<2 x s32>) = COPY $d2
@@ -201,8 +196,8 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], %12
     ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s64) = G_FREEZE [[COPY1]]
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[FREEZE]]
     ; CHECK-NEXT: %sel:_(s1) = G_TRUNC [[AND]](s64)
     ; CHECK-NEXT: %ext:_(s32) = G_ANYEXT %sel(s1)
     ; CHECK-NEXT: $w0 = COPY %ext(s32)
@@ -228,8 +223,8 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], %12
     ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s64) = G_FREEZE [[COPY1]]
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[FREEZE]]
     ; CHECK-NEXT: %sel:_(s1) = G_TRUNC [[AND]](s64)
     ; CHECK-NEXT: %ext:_(s32) = G_ANYEXT %sel(s1)
     ; CHECK-NEXT: $w0 = COPY %ext(s32)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-narrow-binop-feeding-add.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-narrow-binop-feeding-add.mir
index b2a9a802261252..9699d0cf7892cf 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-narrow-binop-feeding-add.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-narrow-binop-feeding-add.mir
@@ -84,8 +84,8 @@ body:             |
     ; CHECK: liveins: $x0, $x1
     ; CHECK: %binop_lhs:_(s64) = COPY $x0
     ; CHECK: %binop_rhs:_(s64) = COPY $x1
-    ; CHECK: %binop:_(s64) = G_AND %binop_lhs, %binop_rhs
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %binop(s64)
+    ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND %binop_lhs, %binop_rhs
+    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[AND]](s64)
     ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s32)
     ; CHECK: $x0 = COPY [[ZEXT]](s64)
     ; CHECK: RET_ReallyLR implicit $x0
@@ -130,10 +130,9 @@ body:             |
     ; CHECK: liveins: $x0, $x1
     ; CHECK: %binop_lhs:_(s64) = COPY $x0
     ; CHECK: %binop_rhs:_(s64) = COPY $x1
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %binop_lhs(s64)
-    ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC %binop_rhs(s64)
-    ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[TRUNC]], [[TRUNC1]]
-    ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[XOR]](s32)
+    ; CHECK: [[XOR:%[0-9]+]]:_(s64) = G_XOR %binop_lhs, %binop_rhs
+    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[XOR]](s64)
+    ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s32)
     ; CHECK: $x0 = COPY [[ZEXT]](s64)
     ; CHECK: RET_ReallyLR implicit $x0
     %binop_lhs:_(s64) = COPY $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-hoist-same-hands.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-hoist-same-hands.mir
index 48fc042d7c7375..7f2ae6ee248074 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-hoist-same-hands.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-hoist-same-hands.mir
@@ -268,10 +268,9 @@ body:             |
     ; CHECK: liveins: $w0, $w1
     ; CHECK: %x_wide:_(s32) = COPY $w0
     ; CHECK: %y_wide:_(s32) = COPY $w1
-    ; CHECK: %x:_(s1) = G_TRUNC %x_wide(s32)
-    ; CHECK: %y:_(s1) = G_TRUNC %y_wide(s32)
-    ; CHECK: [[OR:%[0-9]+]]:_(s1) = G_OR %x, %y
-    ; CHECK: %logic_op:_(s64) = G_SEXT [[OR]](s1)
+    ; CHECK: %8:_(s32) = G_OR %x_wide, %y_wide
+    ; CHECK: %7:_(s1) = G_TRUNC %8(s32)
+    ; CHECK: %logic_op:_(s64) = G_SEXT %7(s1)
     ; CHECK: $x0 = COPY %logic_op(s64)
     ; CHECK: RET_ReallyLR implicit $x0
     %x_wide:_(s32) = COPY $w0

>From 41d695a7f3d4afd7b74b30a3e9d10bc944532992 Mon Sep 17 00:00:00 2001
From: Dhruv Chawla <dhruvc at nvidia.com>
Date: Thu, 18 Apr 2024 10:20:47 +0530
Subject: [PATCH 4/4] Update failing AMDGPU tests

---
 llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll   | 191 +++++++++--------
 llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll   | 195 +++++++++---------
 .../test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll |  10 +-
 .../test/CodeGen/AMDGPU/GlobalISel/usubsat.ll |  10 +-
 llvm/test/CodeGen/AMDGPU/GlobalISel/xnor.ll   |  20 +-
 5 files changed, 204 insertions(+), 222 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
index f9b98059be0b3a..06930388901b0f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
@@ -1804,113 +1804,110 @@ define i24 @v_fshl_i24(i24 %lhs, i24 %rhs, i24 %amt) {
 define amdgpu_ps i48 @s_fshl_v2i24(i48 inreg %lhs.arg, i48 inreg %rhs.arg, i48 inreg %amt.arg) {
 ; GFX6-LABEL: s_fshl_v2i24:
 ; GFX6:       ; %bb.0:
+; GFX6-NEXT:    v_cvt_f32_ubyte0_e32 v2, 24
+; GFX6-NEXT:    v_rcp_iflag_f32_e32 v2, v2
 ; GFX6-NEXT:    s_lshr_b32 s6, s0, 16
-; GFX6-NEXT:    s_lshr_b32 s7, s0, 24
-; GFX6-NEXT:    s_and_b32 s9, s0, 0xff
-; GFX6-NEXT:    s_bfe_u32 s0, s0, 0x80008
-; GFX6-NEXT:    v_cvt_f32_ubyte0_e32 v0, 24
-; GFX6-NEXT:    s_lshl_b32 s0, s0, 8
+; GFX6-NEXT:    s_lshr_b32 s7, s1, 8
+; GFX6-NEXT:    s_bfe_u32 s9, s0, 0x80008
+; GFX6-NEXT:    v_mul_f32_e32 v2, 0x4f7ffffe, v2
+; GFX6-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GFX6-NEXT:    s_and_b32 s8, s0, 0xff
+; GFX6-NEXT:    s_lshl_b32 s9, s9, 8
 ; GFX6-NEXT:    s_and_b32 s6, s6, 0xff
-; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GFX6-NEXT:    s_or_b32 s0, s9, s0
+; GFX6-NEXT:    s_and_b32 s1, s1, 0xff
+; GFX6-NEXT:    v_mov_b32_e32 v0, s0
+; GFX6-NEXT:    s_and_b32 s0, s7, 0xff
+; GFX6-NEXT:    s_or_b32 s8, s8, s9
 ; GFX6-NEXT:    s_and_b32 s6, 0xffff, s6
-; GFX6-NEXT:    s_lshr_b32 s8, s1, 8
+; GFX6-NEXT:    v_alignbit_b32 v0, s1, v0, 24
 ; GFX6-NEXT:    s_and_b32 s0, 0xffff, s0
+; GFX6-NEXT:    s_and_b32 s8, 0xffff, s8
 ; GFX6-NEXT:    s_lshl_b32 s6, s6, 16
+; GFX6-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX6-NEXT:    s_lshl_b32 s0, s0, 16
+; GFX6-NEXT:    v_mov_b32_e32 v3, 0xffffffe8
+; GFX6-NEXT:    s_or_b32 s6, s8, s6
+; GFX6-NEXT:    v_or_b32_e32 v0, s0, v0
+; GFX6-NEXT:    s_lshr_b32 s0, s2, 16
+; GFX6-NEXT:    s_lshr_b32 s1, s3, 8
+; GFX6-NEXT:    s_bfe_u32 s8, s2, 0x80008
+; GFX6-NEXT:    v_mul_lo_u32 v3, v2, v3
+; GFX6-NEXT:    s_and_b32 s7, s2, 0xff
+; GFX6-NEXT:    s_lshl_b32 s8, s8, 8
+; GFX6-NEXT:    s_and_b32 s0, s0, 0xff
+; GFX6-NEXT:    s_and_b32 s3, s3, 0xff
+; GFX6-NEXT:    v_mov_b32_e32 v1, s2
 ; GFX6-NEXT:    s_and_b32 s1, s1, 0xff
-; GFX6-NEXT:    s_or_b32 s0, s0, s6
-; GFX6-NEXT:    s_lshl_b32 s1, s1, 8
-; GFX6-NEXT:    s_and_b32 s6, s8, 0xff
-; GFX6-NEXT:    s_or_b32 s1, s7, s1
-; GFX6-NEXT:    s_and_b32 s6, 0xffff, s6
-; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GFX6-NEXT:    s_or_b32 s7, s7, s8
+; GFX6-NEXT:    s_and_b32 s0, 0xffff, s0
+; GFX6-NEXT:    v_alignbit_b32 v1, s3, v1, 24
+; GFX6-NEXT:    s_and_b32 s1, 0xffff, s1
+; GFX6-NEXT:    s_and_b32 s7, 0xffff, s7
+; GFX6-NEXT:    s_lshl_b32 s0, s0, 16
+; GFX6-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX6-NEXT:    s_lshl_b32 s1, s1, 16
+; GFX6-NEXT:    s_or_b32 s0, s7, s0
+; GFX6-NEXT:    v_or_b32_e32 v1, s1, v1
+; GFX6-NEXT:    s_lshr_b32 s1, s4, 16
+; GFX6-NEXT:    s_bfe_u32 s7, s4, 0x80008
+; GFX6-NEXT:    v_mul_hi_u32 v3, v2, v3
+; GFX6-NEXT:    s_and_b32 s3, s4, 0xff
+; GFX6-NEXT:    s_lshl_b32 s7, s7, 8
+; GFX6-NEXT:    s_and_b32 s1, s1, 0xff
+; GFX6-NEXT:    s_or_b32 s3, s3, s7
 ; GFX6-NEXT:    s_and_b32 s1, 0xffff, s1
-; GFX6-NEXT:    s_lshl_b32 s6, s6, 16
-; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT:    s_or_b32 s1, s1, s6
-; GFX6-NEXT:    s_lshr_b32 s6, s2, 16
-; GFX6-NEXT:    s_lshr_b32 s7, s2, 24
-; GFX6-NEXT:    s_and_b32 s9, s2, 0xff
-; GFX6-NEXT:    s_bfe_u32 s2, s2, 0x80008
-; GFX6-NEXT:    s_lshl_b32 s2, s2, 8
-; GFX6-NEXT:    s_and_b32 s6, s6, 0xff
-; GFX6-NEXT:    s_or_b32 s2, s9, s2
-; GFX6-NEXT:    s_and_b32 s6, 0xffff, s6
-; GFX6-NEXT:    v_mov_b32_e32 v1, 0xffffffe8
-; GFX6-NEXT:    s_lshr_b32 s8, s3, 8
-; GFX6-NEXT:    s_and_b32 s2, 0xffff, s2
-; GFX6-NEXT:    s_lshl_b32 s6, s6, 16
-; GFX6-NEXT:    s_and_b32 s3, s3, 0xff
-; GFX6-NEXT:    v_mul_lo_u32 v1, v0, v1
-; GFX6-NEXT:    s_or_b32 s2, s2, s6
-; GFX6-NEXT:    s_lshl_b32 s3, s3, 8
-; GFX6-NEXT:    s_and_b32 s6, s8, 0xff
-; GFX6-NEXT:    s_or_b32 s3, s7, s3
-; GFX6-NEXT:    s_and_b32 s6, 0xffff, s6
 ; GFX6-NEXT:    s_and_b32 s3, 0xffff, s3
-; GFX6-NEXT:    s_lshl_b32 s6, s6, 16
-; GFX6-NEXT:    s_or_b32 s3, s3, s6
-; GFX6-NEXT:    s_lshr_b32 s6, s4, 16
-; GFX6-NEXT:    s_lshr_b32 s7, s4, 24
-; GFX6-NEXT:    s_and_b32 s9, s4, 0xff
-; GFX6-NEXT:    s_bfe_u32 s4, s4, 0x80008
-; GFX6-NEXT:    v_mul_hi_u32 v1, v0, v1
-; GFX6-NEXT:    s_lshl_b32 s4, s4, 8
-; GFX6-NEXT:    s_and_b32 s6, s6, 0xff
-; GFX6-NEXT:    s_or_b32 s4, s9, s4
-; GFX6-NEXT:    s_and_b32 s6, 0xffff, s6
-; GFX6-NEXT:    s_and_b32 s4, 0xffff, s4
-; GFX6-NEXT:    s_lshl_b32 s6, s6, 16
-; GFX6-NEXT:    s_or_b32 s4, s4, s6
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT:    v_mul_hi_u32 v1, s4, v0
-; GFX6-NEXT:    s_lshr_b32 s8, s5, 8
-; GFX6-NEXT:    s_and_b32 s5, s5, 0xff
-; GFX6-NEXT:    s_lshl_b32 s5, s5, 8
-; GFX6-NEXT:    v_mul_lo_u32 v1, v1, 24
-; GFX6-NEXT:    s_and_b32 s6, s8, 0xff
-; GFX6-NEXT:    s_or_b32 s5, s7, s5
-; GFX6-NEXT:    s_and_b32 s6, 0xffff, s6
-; GFX6-NEXT:    s_and_b32 s5, 0xffff, s5
-; GFX6-NEXT:    s_lshl_b32 s6, s6, 16
-; GFX6-NEXT:    s_or_b32 s5, s5, s6
-; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, s4, v1
-; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, 24, v1
-; GFX6-NEXT:    v_mul_hi_u32 v0, s5, v0
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, 24, v1
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, 24, v1
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, 24, v1
-; GFX6-NEXT:    v_mul_lo_u32 v0, v0, 24
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, 23, v1
-; GFX6-NEXT:    v_and_b32_e32 v1, 0xffffff, v1
-; GFX6-NEXT:    v_lshl_b32_e32 v1, s0, v1
-; GFX6-NEXT:    s_lshr_b32 s0, s2, 1
+; GFX6-NEXT:    s_lshl_b32 s1, s1, 16
+; GFX6-NEXT:    s_or_b32 s1, s3, s1
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
+; GFX6-NEXT:    v_mul_hi_u32 v3, s1, v2
+; GFX6-NEXT:    s_lshr_b32 s2, s5, 8
+; GFX6-NEXT:    s_and_b32 s3, s5, 0xff
+; GFX6-NEXT:    v_mov_b32_e32 v4, s4
+; GFX6-NEXT:    s_and_b32 s2, s2, 0xff
+; GFX6-NEXT:    v_alignbit_b32 v4, s3, v4, 24
+; GFX6-NEXT:    s_and_b32 s2, 0xffff, s2
+; GFX6-NEXT:    v_and_b32_e32 v4, 0xffff, v4
+; GFX6-NEXT:    v_mul_lo_u32 v3, v3, 24
+; GFX6-NEXT:    s_lshl_b32 s2, s2, 16
+; GFX6-NEXT:    v_or_b32_e32 v4, s2, v4
+; GFX6-NEXT:    v_mul_hi_u32 v2, v4, v2
+; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s1, v3
+; GFX6-NEXT:    v_subrev_i32_e32 v5, vcc, 24, v3
+; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, 24, v3
+; GFX6-NEXT:    v_mul_lo_u32 v2, v2, 24
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
+; GFX6-NEXT:    v_subrev_i32_e32 v5, vcc, 24, v3
+; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, 24, v3
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
+; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, v4, v2
+; GFX6-NEXT:    v_sub_i32_e32 v5, vcc, 23, v3
+; GFX6-NEXT:    v_subrev_i32_e32 v4, vcc, 24, v2
+; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, 24, v2
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
+; GFX6-NEXT:    v_subrev_i32_e32 v4, vcc, 24, v2
+; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, 24, v2
+; GFX6-NEXT:    v_and_b32_e32 v3, 0xffffff, v3
+; GFX6-NEXT:    s_lshr_b32 s0, s0, 1
+; GFX6-NEXT:    v_and_b32_e32 v5, 0xffffff, v5
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
+; GFX6-NEXT:    v_lshl_b32_e32 v3, s6, v3
+; GFX6-NEXT:    v_lshr_b32_e32 v5, s0, v5
+; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, 23, v2
 ; GFX6-NEXT:    v_and_b32_e32 v2, 0xffffff, v2
-; GFX6-NEXT:    v_lshr_b32_e32 v2, s0, v2
-; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s5, v0
+; GFX6-NEXT:    v_or_b32_e32 v3, v3, v5
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, v2, v0
+; GFX6-NEXT:    v_lshrrev_b32_e32 v1, 1, v1
+; GFX6-NEXT:    v_and_b32_e32 v2, 0xffffff, v4
+; GFX6-NEXT:    v_lshrrev_b32_e32 v1, v2, v1
+; GFX6-NEXT:    v_bfe_u32 v2, v3, 8, 8
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, 0xff, v3
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
+; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT:    v_bfe_u32 v2, v3, 16, 8
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
-; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, 24, v0
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, 24, v0
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, 24, v0
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, 24, v0
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, 23, v0
-; GFX6-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
-; GFX6-NEXT:    s_lshr_b32 s0, s3, 1
-; GFX6-NEXT:    v_and_b32_e32 v2, 0xffffff, v2
-; GFX6-NEXT:    v_lshl_b32_e32 v0, s1, v0
-; GFX6-NEXT:    v_lshr_b32_e32 v2, s0, v2
-; GFX6-NEXT:    v_bfe_u32 v3, v1, 8, 8
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v2
-; GFX6-NEXT:    v_and_b32_e32 v2, 0xff, v1
-; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
-; GFX6-NEXT:    v_bfe_u32 v1, v1, 16, 8
-; GFX6-NEXT:    v_or_b32_e32 v2, v2, v3
-; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX6-NEXT:    v_or_b32_e32 v1, v2, v1
 ; GFX6-NEXT:    v_and_b32_e32 v2, 0xff, v0
 ; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
 ; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
index c8455665e7b40f..ff93cddafc8728 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
@@ -1815,113 +1815,110 @@ define i24 @v_fshr_i24(i24 %lhs, i24 %rhs, i24 %amt) {
 define amdgpu_ps i48 @s_fshr_v2i24(i48 inreg %lhs.arg, i48 inreg %rhs.arg, i48 inreg %amt.arg) {
 ; GFX6-LABEL: s_fshr_v2i24:
 ; GFX6:       ; %bb.0:
-; GFX6-NEXT:    v_cvt_f32_ubyte0_e32 v0, 24
-; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GFX6-NEXT:    v_cvt_f32_ubyte0_e32 v2, 24
+; GFX6-NEXT:    v_rcp_iflag_f32_e32 v2, v2
+; GFX6-NEXT:    s_lshr_b32 s7, s1, 8
+; GFX6-NEXT:    s_bfe_u32 s9, s0, 0x80008
+; GFX6-NEXT:    s_and_b32 s1, s1, 0xff
+; GFX6-NEXT:    v_mul_f32_e32 v2, 0x4f7ffffe, v2
+; GFX6-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GFX6-NEXT:    v_mov_b32_e32 v0, s0
+; GFX6-NEXT:    v_mov_b32_e32 v3, 0xffffffe8
 ; GFX6-NEXT:    s_lshr_b32 s6, s0, 16
-; GFX6-NEXT:    s_lshr_b32 s7, s0, 24
-; GFX6-NEXT:    s_lshr_b32 s8, s1, 8
-; GFX6-NEXT:    s_and_b32 s9, s0, 0xff
-; GFX6-NEXT:    s_bfe_u32 s0, s0, 0x80008
+; GFX6-NEXT:    s_and_b32 s8, s0, 0xff
+; GFX6-NEXT:    s_lshl_b32 s9, s9, 8
+; GFX6-NEXT:    v_alignbit_b32 v0, s1, v0, 24
+; GFX6-NEXT:    s_and_b32 s0, s7, 0xff
+; GFX6-NEXT:    s_lshr_b32 s1, s2, 16
+; GFX6-NEXT:    s_lshr_b32 s7, s3, 8
+; GFX6-NEXT:    s_bfe_u32 s10, s2, 0x80008
+; GFX6-NEXT:    v_mul_lo_u32 v3, v2, v3
+; GFX6-NEXT:    s_or_b32 s8, s8, s9
+; GFX6-NEXT:    s_and_b32 s9, s2, 0xff
+; GFX6-NEXT:    s_lshl_b32 s10, s10, 8
 ; GFX6-NEXT:    s_and_b32 s1, s1, 0xff
-; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
-; GFX6-NEXT:    s_lshl_b32 s0, s0, 8
-; GFX6-NEXT:    s_lshl_b32 s1, s1, 8
-; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT:    s_or_b32 s0, s9, s0
-; GFX6-NEXT:    s_or_b32 s1, s7, s1
-; GFX6-NEXT:    s_and_b32 s7, s8, 0xff
-; GFX6-NEXT:    s_lshr_b32 s8, s2, 16
-; GFX6-NEXT:    s_lshr_b32 s9, s2, 24
-; GFX6-NEXT:    s_and_b32 s11, s2, 0xff
-; GFX6-NEXT:    s_bfe_u32 s2, s2, 0x80008
-; GFX6-NEXT:    s_lshl_b32 s2, s2, 8
-; GFX6-NEXT:    s_and_b32 s8, s8, 0xff
-; GFX6-NEXT:    s_or_b32 s2, s11, s2
-; GFX6-NEXT:    s_and_b32 s8, 0xffff, s8
-; GFX6-NEXT:    v_mov_b32_e32 v1, 0xffffffe8
-; GFX6-NEXT:    s_lshr_b32 s10, s3, 8
+; GFX6-NEXT:    s_and_b32 s3, s3, 0xff
+; GFX6-NEXT:    v_mov_b32_e32 v1, s2
+; GFX6-NEXT:    s_and_b32 s2, s7, 0xff
+; GFX6-NEXT:    s_or_b32 s9, s9, s10
+; GFX6-NEXT:    s_and_b32 s1, 0xffff, s1
+; GFX6-NEXT:    v_alignbit_b32 v1, s3, v1, 24
+; GFX6-NEXT:    s_and_b32 s2, 0xffff, s2
+; GFX6-NEXT:    s_and_b32 s9, 0xffff, s9
+; GFX6-NEXT:    s_lshl_b32 s1, s1, 16
+; GFX6-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX6-NEXT:    s_lshl_b32 s2, s2, 16
+; GFX6-NEXT:    s_or_b32 s1, s9, s1
+; GFX6-NEXT:    v_or_b32_e32 v1, s2, v1
+; GFX6-NEXT:    s_lshr_b32 s2, s4, 16
+; GFX6-NEXT:    s_bfe_u32 s9, s4, 0x80008
+; GFX6-NEXT:    v_mul_hi_u32 v3, v2, v3
+; GFX6-NEXT:    s_and_b32 s7, s4, 0xff
+; GFX6-NEXT:    s_lshl_b32 s9, s9, 8
+; GFX6-NEXT:    s_and_b32 s2, s2, 0xff
+; GFX6-NEXT:    s_or_b32 s7, s7, s9
 ; GFX6-NEXT:    s_and_b32 s2, 0xffff, s2
-; GFX6-NEXT:    s_lshl_b32 s8, s8, 16
+; GFX6-NEXT:    s_and_b32 s7, 0xffff, s7
+; GFX6-NEXT:    s_lshl_b32 s2, s2, 16
+; GFX6-NEXT:    s_or_b32 s2, s7, s2
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
+; GFX6-NEXT:    v_mul_hi_u32 v3, s2, v2
+; GFX6-NEXT:    s_lshr_b32 s3, s5, 8
+; GFX6-NEXT:    s_and_b32 s5, s5, 0xff
+; GFX6-NEXT:    v_mov_b32_e32 v4, s4
 ; GFX6-NEXT:    s_and_b32 s3, s3, 0xff
-; GFX6-NEXT:    v_mul_lo_u32 v1, v0, v1
-; GFX6-NEXT:    s_or_b32 s2, s2, s8
-; GFX6-NEXT:    s_lshl_b32 s3, s3, 8
-; GFX6-NEXT:    s_and_b32 s8, s10, 0xff
-; GFX6-NEXT:    s_or_b32 s3, s9, s3
-; GFX6-NEXT:    s_and_b32 s8, 0xffff, s8
+; GFX6-NEXT:    v_alignbit_b32 v4, s5, v4, 24
 ; GFX6-NEXT:    s_and_b32 s3, 0xffff, s3
-; GFX6-NEXT:    s_lshl_b32 s8, s8, 16
-; GFX6-NEXT:    s_or_b32 s3, s3, s8
-; GFX6-NEXT:    s_lshr_b32 s8, s4, 16
-; GFX6-NEXT:    s_lshr_b32 s9, s4, 24
-; GFX6-NEXT:    s_and_b32 s11, s4, 0xff
-; GFX6-NEXT:    s_bfe_u32 s4, s4, 0x80008
-; GFX6-NEXT:    v_mul_hi_u32 v1, v0, v1
-; GFX6-NEXT:    s_lshl_b32 s4, s4, 8
-; GFX6-NEXT:    s_and_b32 s8, s8, 0xff
-; GFX6-NEXT:    s_or_b32 s4, s11, s4
-; GFX6-NEXT:    s_and_b32 s8, 0xffff, s8
-; GFX6-NEXT:    s_and_b32 s4, 0xffff, s4
-; GFX6-NEXT:    s_lshl_b32 s8, s8, 16
-; GFX6-NEXT:    s_or_b32 s4, s4, s8
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT:    v_mul_hi_u32 v1, s4, v0
-; GFX6-NEXT:    s_lshr_b32 s10, s5, 8
-; GFX6-NEXT:    s_and_b32 s5, s5, 0xff
-; GFX6-NEXT:    s_lshl_b32 s5, s5, 8
-; GFX6-NEXT:    v_mul_lo_u32 v1, v1, 24
-; GFX6-NEXT:    s_and_b32 s8, s10, 0xff
-; GFX6-NEXT:    s_or_b32 s5, s9, s5
-; GFX6-NEXT:    s_and_b32 s8, 0xffff, s8
-; GFX6-NEXT:    s_and_b32 s5, 0xffff, s5
-; GFX6-NEXT:    s_lshl_b32 s8, s8, 16
-; GFX6-NEXT:    s_or_b32 s5, s5, s8
-; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, s4, v1
-; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, 24, v1
-; GFX6-NEXT:    v_mul_hi_u32 v0, s5, v0
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, 24, v1
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, 24, v1
+; GFX6-NEXT:    v_and_b32_e32 v4, 0xffff, v4
+; GFX6-NEXT:    v_mul_lo_u32 v3, v3, 24
+; GFX6-NEXT:    s_lshl_b32 s3, s3, 16
+; GFX6-NEXT:    v_or_b32_e32 v4, s3, v4
+; GFX6-NEXT:    v_mul_hi_u32 v2, v4, v2
+; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s2, v3
+; GFX6-NEXT:    v_subrev_i32_e32 v5, vcc, 24, v3
+; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, 24, v3
+; GFX6-NEXT:    v_mul_lo_u32 v2, v2, 24
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
+; GFX6-NEXT:    v_subrev_i32_e32 v5, vcc, 24, v3
+; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, 24, v3
+; GFX6-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
+; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, v4, v2
+; GFX6-NEXT:    v_sub_i32_e32 v5, vcc, 23, v3
+; GFX6-NEXT:    v_subrev_i32_e32 v4, vcc, 24, v2
+; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, 24, v2
 ; GFX6-NEXT:    s_and_b32 s6, s6, 0xff
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, 24, v1
-; GFX6-NEXT:    v_mul_lo_u32 v0, v0, 24
-; GFX6-NEXT:    s_and_b32 s0, 0xffff, s0
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
+; GFX6-NEXT:    s_and_b32 s8, 0xffff, s8
 ; GFX6-NEXT:    s_and_b32 s6, 0xffff, s6
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, 23, v1
-; GFX6-NEXT:    s_lshl_b32 s4, s6, 17
-; GFX6-NEXT:    s_lshl_b32 s0, s0, 1
-; GFX6-NEXT:    s_or_b32 s0, s4, s0
-; GFX6-NEXT:    v_and_b32_e32 v2, 0xffffff, v2
-; GFX6-NEXT:    v_and_b32_e32 v1, 0xffffff, v1
-; GFX6-NEXT:    v_lshl_b32_e32 v2, s0, v2
-; GFX6-NEXT:    v_lshr_b32_e32 v1, s2, v1
-; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s5, v0
-; GFX6-NEXT:    v_or_b32_e32 v1, v2, v1
-; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, 24, v0
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, 24, v0
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, 24, v0
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, 24, v0
-; GFX6-NEXT:    s_and_b32 s1, 0xffff, s1
-; GFX6-NEXT:    s_and_b32 s7, 0xffff, s7
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, 23, v0
-; GFX6-NEXT:    s_lshl_b32 s0, s7, 17
-; GFX6-NEXT:    s_lshl_b32 s1, s1, 1
-; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    v_subrev_i32_e32 v4, vcc, 24, v2
+; GFX6-NEXT:    s_lshl_b32 s2, s6, 17
+; GFX6-NEXT:    s_lshl_b32 s3, s8, 1
+; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, 24, v2
+; GFX6-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX6-NEXT:    s_and_b32 s0, 0xffff, s0
+; GFX6-NEXT:    s_or_b32 s2, s2, s3
+; GFX6-NEXT:    v_and_b32_e32 v5, 0xffffff, v5
+; GFX6-NEXT:    v_and_b32_e32 v3, 0xffffff, v3
+; GFX6-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
+; GFX6-NEXT:    v_lshl_b32_e32 v5, s2, v5
+; GFX6-NEXT:    v_lshr_b32_e32 v3, s1, v3
+; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, 23, v2
+; GFX6-NEXT:    s_lshl_b32 s0, s0, 17
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; GFX6-NEXT:    v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT:    v_or_b32_e32 v0, s0, v0
+; GFX6-NEXT:    v_and_b32_e32 v4, 0xffffff, v4
 ; GFX6-NEXT:    v_and_b32_e32 v2, 0xffffff, v2
-; GFX6-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
-; GFX6-NEXT:    v_lshl_b32_e32 v2, s0, v2
-; GFX6-NEXT:    v_lshr_b32_e32 v0, s3, v0
-; GFX6-NEXT:    v_bfe_u32 v3, v1, 8, 8
-; GFX6-NEXT:    v_or_b32_e32 v0, v2, v0
-; GFX6-NEXT:    v_and_b32_e32 v2, 0xff, v1
-; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
-; GFX6-NEXT:    v_bfe_u32 v1, v1, 16, 8
-; GFX6-NEXT:    v_or_b32_e32 v2, v2, v3
-; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX6-NEXT:    v_or_b32_e32 v1, v2, v1
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, v4, v0
+; GFX6-NEXT:    v_lshrrev_b32_e32 v1, v2, v1
+; GFX6-NEXT:    v_bfe_u32 v2, v3, 8, 8
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, 0xff, v3
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
+; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT:    v_bfe_u32 v2, v3, 16, 8
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
 ; GFX6-NEXT:    v_and_b32_e32 v2, 0xff, v0
 ; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
 ; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll
index 1821d29d4b050b..788692c94b0cfa 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll
@@ -192,9 +192,7 @@ define i16 @v_uaddsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
 ; GFX6-NEXT:    v_min_u32_e32 v2, v3, v2
 ; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
 ; GFX6-NEXT:    v_lshrrev_b32_e32 v1, 24, v1
-; GFX6-NEXT:    v_lshrrev_b32_e32 v0, 24, v0
-; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_alignbit_b32 v0, v1, v0, 24
 ; GFX6-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_uaddsat_v2i8:
@@ -290,9 +288,9 @@ define amdgpu_ps i16 @s_uaddsat_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg) {
 ; GFX6-NEXT:    s_min_u32 s2, s3, s2
 ; GFX6-NEXT:    s_add_i32 s1, s1, s2
 ; GFX6-NEXT:    s_lshr_b32 s1, s1, 24
-; GFX6-NEXT:    s_lshr_b32 s0, s0, 24
-; GFX6-NEXT:    s_lshl_b32 s1, s1, 8
-; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    v_mov_b32_e32 v0, s0
+; GFX6-NEXT:    v_alignbit_b32 v0, s1, v0, 24
+; GFX6-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX6-NEXT:    ; return to shader part epilog
 ;
 ; GFX8-LABEL: s_uaddsat_v2i8:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll
index a60370cd460f9e..0042d34e235d17 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll
@@ -186,9 +186,7 @@ define i16 @v_usubsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
 ; GFX6-NEXT:    v_min_u32_e32 v2, v1, v2
 ; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, v1, v2
 ; GFX6-NEXT:    v_lshrrev_b32_e32 v1, 24, v1
-; GFX6-NEXT:    v_lshrrev_b32_e32 v0, 24, v0
-; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
-; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_alignbit_b32 v0, v1, v0, 24
 ; GFX6-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_usubsat_v2i8:
@@ -282,9 +280,9 @@ define amdgpu_ps i16 @s_usubsat_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg) {
 ; GFX6-NEXT:    s_min_u32 s2, s1, s2
 ; GFX6-NEXT:    s_sub_i32 s1, s1, s2
 ; GFX6-NEXT:    s_lshr_b32 s1, s1, 24
-; GFX6-NEXT:    s_lshr_b32 s0, s0, 24
-; GFX6-NEXT:    s_lshl_b32 s1, s1, 8
-; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    v_mov_b32_e32 v0, s0
+; GFX6-NEXT:    v_alignbit_b32 v0, s1, v0, 24
+; GFX6-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX6-NEXT:    ; return to shader part epilog
 ;
 ; GFX8-LABEL: s_usubsat_v2i8:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/xnor.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/xnor.ll
index cec73b7c3617b7..6bb4e2d3dbe26e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/xnor.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/xnor.ll
@@ -26,13 +26,10 @@ entry:
 define amdgpu_ps i32 @scalar_xnor_v2i16_one_use(<2 x i16> inreg %a, <2 x i16> inreg %b) {
 ; GFX7-LABEL: scalar_xnor_v2i16_one_use:
 ; GFX7:       ; %bb.0: ; %entry
+; GFX7-NEXT:    s_xor_b64 s[0:1], s[0:1], s[2:3]
 ; GFX7-NEXT:    s_lshl_b32 s1, s1, 16
 ; GFX7-NEXT:    s_and_b32 s0, s0, 0xffff
 ; GFX7-NEXT:    s_or_b32 s0, s1, s0
-; GFX7-NEXT:    s_lshl_b32 s1, s3, 16
-; GFX7-NEXT:    s_and_b32 s2, s2, 0xffff
-; GFX7-NEXT:    s_or_b32 s1, s1, s2
-; GFX7-NEXT:    s_xor_b32 s0, s0, s1
 ; GFX7-NEXT:    s_xor_b32 s0, s0, -1
 ; GFX7-NEXT:    ; return to shader part epilog
 ;
@@ -117,22 +114,17 @@ define amdgpu_ps i64 @scalar_xnor_i64_one_use(i64 inreg %a, i64 inreg %b) {
 define amdgpu_ps i64 @scalar_xnor_v4i16_one_use(<4 x i16> inreg %a, <4 x i16> inreg %b) {
 ; GFX7-LABEL: scalar_xnor_v4i16_one_use:
 ; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX7-NEXT:    s_xor_b64 s[2:3], s[2:3], s[6:7]
 ; GFX7-NEXT:    s_lshl_b32 s1, s1, 16
 ; GFX7-NEXT:    s_and_b32 s0, s0, 0xffff
+; GFX7-NEXT:    s_mov_b32 s8, -1
 ; GFX7-NEXT:    s_or_b32 s0, s1, s0
 ; GFX7-NEXT:    s_lshl_b32 s1, s3, 16
 ; GFX7-NEXT:    s_and_b32 s2, s2, 0xffff
+; GFX7-NEXT:    s_mov_b32 s9, s8
 ; GFX7-NEXT:    s_or_b32 s1, s1, s2
-; GFX7-NEXT:    s_lshl_b32 s2, s5, 16
-; GFX7-NEXT:    s_and_b32 s3, s4, 0xffff
-; GFX7-NEXT:    s_or_b32 s2, s2, s3
-; GFX7-NEXT:    s_lshl_b32 s3, s7, 16
-; GFX7-NEXT:    s_and_b32 s4, s6, 0xffff
-; GFX7-NEXT:    s_or_b32 s3, s3, s4
-; GFX7-NEXT:    s_mov_b32 s4, -1
-; GFX7-NEXT:    s_mov_b32 s5, s4
-; GFX7-NEXT:    s_xor_b64 s[0:1], s[0:1], s[2:3]
-; GFX7-NEXT:    s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX7-NEXT:    s_xor_b64 s[0:1], s[0:1], s[8:9]
 ; GFX7-NEXT:    ; return to shader part epilog
 ;
 ; GFX8-LABEL: scalar_xnor_v4i16_one_use:



More information about the llvm-commits mailing list