[llvm] 3612d9e - [GISel] Rework trunc/shl combine in a generic trunc/shift combine

Pierre van Houtryve via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 9 01:46:50 PST 2022


Author: Pierre van Houtryve
Date: 2022-12-09T04:46:45-05:00
New Revision: 3612d9eaacd0571692f6ed80bd67403bc1ca541e

URL: https://github.com/llvm/llvm-project/commit/3612d9eaacd0571692f6ed80bd67403bc1ca541e
DIFF: https://github.com/llvm/llvm-project/commit/3612d9eaacd0571692f6ed80bd67403bc1ca541e.diff

LOG: [GISel] Rework trunc/shl combine in a generic trunc/shift combine

This combine only handled left shifts, but now it can handle right shifts as well. It handles right shifts conservatively and only truncates them to the size returned by TLI.

AMDGPU benefits from always lowering shifts to 32 bits for instance, but AArch64 would rather keep them at 64 bits.

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D136319

Added: 
    llvm/test/CodeGen/AMDGPU/GlobalISel/combine-trunc-shift.mir

Modified: 
    llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
    llvm/include/llvm/Target/GlobalISel/Combine.td
    llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp

Removed: 
    llvm/test/CodeGen/AMDGPU/GlobalISel/combine-trunc-shl.mir


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 0f7bd93aef714..93ad4ceca1370 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -406,12 +406,17 @@ class CombinerHelper {
   void applyCombineTruncOfExt(MachineInstr &MI,
                               std::pair<Register, unsigned> &MatchInfo);
 
-  /// Transform trunc (shl x, K) to shl (trunc x),
-  /// K => K < VT.getScalarSizeInBits().
-  bool matchCombineTruncOfShl(MachineInstr &MI,
-                              std::pair<Register, Register> &MatchInfo);
-  void applyCombineTruncOfShl(MachineInstr &MI,
-                              std::pair<Register, Register> &MatchInfo);
+  /// Transform trunc (shl x, K) to shl (trunc x), K
+  ///    if K < VT.getScalarSizeInBits().
+  ///
+  /// Transforms trunc ([al]shr x, K) to (trunc ([al]shr (MidVT (trunc x)), K))
+  ///    if K <= (MidVT.getScalarSizeInBits() - VT.getScalarSizeInBits())
+  /// MidVT is obtained by finding a legal type between the trunc's src and dst
+  /// types.
+  bool matchCombineTruncOfShift(MachineInstr &MI,
+                                std::pair<MachineInstr *, LLT> &MatchInfo);
+  void applyCombineTruncOfShift(MachineInstr &MI,
+                                std::pair<MachineInstr *, LLT> &MatchInfo);
 
   /// Transform G_MUL(x, -1) to G_SUB(0, x)
   void applyCombineMulByNegativeOne(MachineInstr &MI);

diff  --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index f65b1ed3bc876..544f8c2555e82 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -642,13 +642,15 @@ def trunc_ext_fold: GICombineRule <
   (apply [{ Helper.applyCombineTruncOfExt(*${root}, ${matchinfo}); }])
 >;
 
-// Fold trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits().
-def trunc_shl_matchinfo : GIDefMatchData<"std::pair<Register, Register>">;
-def trunc_shl: GICombineRule <
-  (defs root:$root, trunc_shl_matchinfo:$matchinfo),
+// Under certain conditions, transform:
+//  trunc (shl x, K)     -> shl (trunc x), K//
+//  trunc ([al]shr x, K) -> (trunc ([al]shr (trunc x), K))
+def trunc_shift_matchinfo : GIDefMatchData<"std::pair<MachineInstr*, LLT>">;
+def trunc_shift: GICombineRule <
+  (defs root:$root, trunc_shift_matchinfo:$matchinfo),
   (match (wip_match_opcode G_TRUNC):$root,
-         [{ return Helper.matchCombineTruncOfShl(*${root}, ${matchinfo}); }]),
-  (apply [{ Helper.applyCombineTruncOfShl(*${root}, ${matchinfo}); }])
+         [{ return Helper.matchCombineTruncOfShift(*${root}, ${matchinfo}); }]),
+  (apply [{ Helper.applyCombineTruncOfShift(*${root}, ${matchinfo}); }])
 >;
 
 // Transform (mul x, -1) -> (sub 0, x)
@@ -1076,7 +1078,7 @@ def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
     known_bits_simplifications, ext_ext_fold,
     not_cmp_fold, opt_brcond_by_inverting_cond,
     unmerge_merge, unmerge_cst, unmerge_dead_to_trunc,
-    unmerge_zext_to_zext, merge_unmerge, trunc_ext_fold, trunc_shl,
+    unmerge_zext_to_zext, merge_unmerge, trunc_ext_fold, trunc_shift,
     const_combines, xor_of_and_with_same_reg, ptr_add_with_zero,
     shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine,
     truncstore_merge, div_rem_to_divrem, funnel_shift_combines,

diff  --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 255e9a2255d90..27f4f4fd8fce4 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -2266,44 +2266,109 @@ void CombinerHelper::applyCombineTruncOfExt(
   MI.eraseFromParent();
 }
 
-bool CombinerHelper::matchCombineTruncOfShl(
-    MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
-  assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
-  Register DstReg = MI.getOperand(0).getReg();
-  Register SrcReg = MI.getOperand(1).getReg();
-  LLT DstTy = MRI.getType(DstReg);
-  Register ShiftSrc;
-  Register ShiftAmt;
-
-  if (MRI.hasOneNonDBGUse(SrcReg) &&
-      mi_match(SrcReg, MRI, m_GShl(m_Reg(ShiftSrc), m_Reg(ShiftAmt))) &&
-      isLegalOrBeforeLegalizer(
-          {TargetOpcode::G_SHL,
-           {DstTy, getTargetLowering().getPreferredShiftAmountTy(DstTy)}})) {
-    KnownBits Known = KB->getKnownBits(ShiftAmt);
-    unsigned Size = DstTy.getSizeInBits();
-    if (Known.countMaxActiveBits() <= Log2_32(Size)) {
-      MatchInfo = std::make_pair(ShiftSrc, ShiftAmt);
-      return true;
-    }
-  }
-  return false;
+static LLT getMidVTForTruncRightShiftCombine(LLT ShiftTy, LLT TruncTy) {
+  const unsigned ShiftSize = ShiftTy.getScalarSizeInBits();
+  const unsigned TruncSize = TruncTy.getScalarSizeInBits();
+
+  // ShiftTy > 32 > TruncTy -> 32
+  if (ShiftSize > 32 && TruncSize < 32)
+    return ShiftTy.changeElementSize(32);
+
+  // TODO: We could also reduce to 16 bits, but that's more target-dependent.
+  //  Some targets like it, some don't, some only like it under certain
+  //  conditions/processor versions, etc.
+  //  A TL hook might be needed for this.
+
+  // Don't combine
+  return ShiftTy;
 }
 
-void CombinerHelper::applyCombineTruncOfShl(
-    MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
+bool CombinerHelper::matchCombineTruncOfShift(
+    MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) {
   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
   Register DstReg = MI.getOperand(0).getReg();
   Register SrcReg = MI.getOperand(1).getReg();
+
+  if (!MRI.hasOneNonDBGUse(SrcReg))
+    return false;
+
+  LLT SrcTy = MRI.getType(SrcReg);
   LLT DstTy = MRI.getType(DstReg);
-  MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
 
-  Register ShiftSrc = MatchInfo.first;
-  Register ShiftAmt = MatchInfo.second;
+  MachineInstr *SrcMI = getDefIgnoringCopies(SrcReg, MRI);
+  const auto &TL = getTargetLowering();
+
+  LLT NewShiftTy;
+  switch (SrcMI->getOpcode()) {
+  default:
+    return false;
+  case TargetOpcode::G_SHL: {
+    NewShiftTy = DstTy;
+
+    // Make sure new shift amount is legal.
+    KnownBits Known = KB->getKnownBits(SrcMI->getOperand(2).getReg());
+    if (Known.getMaxValue().uge(NewShiftTy.getScalarSizeInBits()))
+      return false;
+    break;
+  }
+  case TargetOpcode::G_LSHR:
+  case TargetOpcode::G_ASHR: {
+    // For right shifts, we conservatively do not do the transform if the TRUNC
+    // has any STORE users. The reason is that if we change the type of the
+    // shift, we may break the truncstore combine.
+    //
+    // TODO: Fix truncstore combine to handle (trunc(lshr (trunc x), k)).
+    for (auto &User : MRI.use_instructions(DstReg))
+      if (User.getOpcode() == TargetOpcode::G_STORE)
+        return false;
+
+    NewShiftTy = getMidVTForTruncRightShiftCombine(SrcTy, DstTy);
+    if (NewShiftTy == SrcTy)
+      return false;
+
+    // Make sure we won't lose information by truncating the high bits.
+    KnownBits Known = KB->getKnownBits(SrcMI->getOperand(2).getReg());
+    if (Known.getMaxValue().ugt(NewShiftTy.getScalarSizeInBits() -
+                                DstTy.getScalarSizeInBits()))
+      return false;
+    break;
+  }
+  }
+
+  if (!isLegalOrBeforeLegalizer(
+          {SrcMI->getOpcode(),
+           {NewShiftTy, TL.getPreferredShiftAmountTy(NewShiftTy)}}))
+    return false;
+
+  MatchInfo = std::make_pair(SrcMI, NewShiftTy);
+  return true;
+}
+
+void CombinerHelper::applyCombineTruncOfShift(
+    MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) {
   Builder.setInstrAndDebugLoc(MI);
-  auto TruncShiftSrc = Builder.buildTrunc(DstTy, ShiftSrc);
-  Builder.buildShl(DstReg, TruncShiftSrc, ShiftAmt, SrcMI->getFlags());
-  MI.eraseFromParent();
+
+  MachineInstr *ShiftMI = MatchInfo.first;
+  LLT NewShiftTy = MatchInfo.second;
+
+  Register Dst = MI.getOperand(0).getReg();
+  LLT DstTy = MRI.getType(Dst);
+
+  Register ShiftAmt = ShiftMI->getOperand(2).getReg();
+  Register ShiftSrc = ShiftMI->getOperand(1).getReg();
+  ShiftSrc = Builder.buildTrunc(NewShiftTy, ShiftSrc).getReg(0);
+
+  Register NewShift =
+      Builder
+          .buildInstr(ShiftMI->getOpcode(), {NewShiftTy}, {ShiftSrc, ShiftAmt})
+          .getReg(0);
+
+  if (NewShiftTy == DstTy)
+    replaceRegWith(MRI, Dst, NewShift);
+  else
+    Builder.buildTrunc(Dst, NewShift);
+
+  eraseInst(MI);
 }
 
 bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) {

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-trunc-shift.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-trunc-shift.mir
new file mode 100644
index 0000000000000..187a7786a51fd
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-trunc-shift.mir
@@ -0,0 +1,209 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -run-pass=amdgpu-postlegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: trunc_s32_shl_s64_5
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: trunc_s32_shl_s64_5
+    ; CHECK: liveins: $vgpr0_vgpr1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]](s32)
+    ; CHECK-NEXT: $vgpr0 = COPY [[SHL]](s32)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s32) = G_CONSTANT i32 1
+    %2:_(s64) = G_SHL %0:_, %1
+    %3:_(s32) = G_TRUNC %2
+    $vgpr0 = COPY %3
+...
+
+---
+name: trunc_s16_shl_s32_5
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: trunc_s16_shl_s32_5
+    ; CHECK: liveins: $vgpr0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32)
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
+    ; CHECK-NEXT: S_ENDPGM 0, implicit [[TRUNC]](s16)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = G_CONSTANT i32 1
+    %2:_(s32) = G_SHL %0:_, %1
+    %3:_(s16) = G_TRUNC %2
+    S_ENDPGM 0, implicit %3
+
+...
+
+---
+name: trunc_s16_shl_s64_5
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; CHECK-LABEL: name: trunc_s16_shl_s64_5
+    ; CHECK: liveins: $vgpr0_vgpr1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s32)
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s64)
+    ; CHECK-NEXT: S_ENDPGM 0, implicit [[TRUNC]](s16)
+    %0:_(s64) = COPY $vgpr0_vgpr1
+    %1:_(s32) = G_CONSTANT i32 1
+    %2:_(s64) = G_SHL %0:_, %1
+    %3:_(s16) = G_TRUNC %2
+    S_ENDPGM 0, implicit %3
+
+...
+
+---
+name:            s16_trunc_s64_lshr_16
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0
+    ; CHECK-LABEL: name: s16_trunc_s64_lshr_16
+    ; CHECK: liveins: $vgpr0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK-NEXT: %amt:_(s32) = G_CONSTANT i32 16
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], %amt(s32)
+    ; CHECK-NEXT: %trunc:_(s16) = G_TRUNC [[LSHR]](s32)
+    ; CHECK-NEXT: %foo:_(s16) = G_CONSTANT i16 55
+    ; CHECK-NEXT: %keep:_(s32) = G_MERGE_VALUES %trunc(s16), %foo(s16)
+    ; CHECK-NEXT: $vgpr0 = COPY %keep(s32)
+    %0:_(s32) = COPY $vgpr0
+    %src:_(s64) = G_ZEXT %0
+    %amt:_(s32) = G_CONSTANT i32 16
+    %shift:_(s64) = G_LSHR %src, %amt
+    %trunc:_(s16) = G_TRUNC %shift
+    %foo:_(s16) = G_CONSTANT i16 55
+    %keep:_(s32) = G_MERGE_VALUES %trunc, %foo
+    $vgpr0 = COPY %keep
+...
+
+---
+name:            s16_trunc_s64_ashr_16
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0
+    ; CHECK-LABEL: name: s16_trunc_s64_ashr_16
+    ; CHECK: liveins: $vgpr0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK-NEXT: %amt:_(s32) = G_CONSTANT i32 16
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], %amt(s32)
+    ; CHECK-NEXT: %trunc:_(s16) = G_TRUNC [[ASHR]](s32)
+    ; CHECK-NEXT: %foo:_(s16) = G_CONSTANT i16 55
+    ; CHECK-NEXT: %keep:_(s32) = G_MERGE_VALUES %trunc(s16), %foo(s16)
+    ; CHECK-NEXT: $vgpr0 = COPY %keep(s32)
+    %0:_(s32) = COPY $vgpr0
+    %src:_(s64) = G_ZEXT %0
+    %amt:_(s32) = G_CONSTANT i32 16
+    %shift:_(s64) = G_ASHR %src, %amt
+    %trunc:_(s16) = G_TRUNC %shift
+    %foo:_(s16) = G_CONSTANT i16 55
+    %keep:_(s32) = G_MERGE_VALUES %trunc, %foo
+    $vgpr0 = COPY %keep
+...
+
+---
+name:            s16_trunc_s64_lshr_17_nofold
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0
+    ; CHECK-LABEL: name: s16_trunc_s64_lshr_17_nofold
+    ; CHECK: liveins: $vgpr0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK-NEXT: %src:_(s64) = G_ZEXT [[COPY]](s32)
+    ; CHECK-NEXT: %amt:_(s32) = G_CONSTANT i32 17
+    ; CHECK-NEXT: %shift:_(s64) = G_LSHR %src, %amt(s32)
+    ; CHECK-NEXT: %trunc:_(s16) = G_TRUNC %shift(s64)
+    ; CHECK-NEXT: %foo:_(s16) = G_CONSTANT i16 55
+    ; CHECK-NEXT: %keep:_(s32) = G_MERGE_VALUES %trunc(s16), %foo(s16)
+    ; CHECK-NEXT: $vgpr0 = COPY %keep(s32)
+    %0:_(s32) = COPY $vgpr0
+    %src:_(s64) = G_ZEXT %0
+    %amt:_(s32) = G_CONSTANT i32 17
+    %shift:_(s64) = G_LSHR %src, %amt
+    %trunc:_(s16) = G_TRUNC %shift
+    %foo:_(s16) = G_CONSTANT i16 55
+    %keep:_(s32) = G_MERGE_VALUES %trunc, %foo
+    $vgpr0 = COPY %keep
+...
+
+---
+name:            s26_trunc_s64_lshr_6
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0
+    ; CHECK-LABEL: name: s26_trunc_s64_lshr_6
+    ; CHECK: liveins: $vgpr0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK-NEXT: %amt:_(s32) = G_CONSTANT i32 6
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], %amt(s32)
+    ; CHECK-NEXT: %trunc:_(s26) = G_TRUNC [[LSHR]](s32)
+    ; CHECK-NEXT: %foo:_(s26) = G_CONSTANT i26 55
+    ; CHECK-NEXT: %keep0:_(s26) = G_ADD %trunc, %foo
+    ; CHECK-NEXT: %keep1:_(s32) = G_ANYEXT %keep0(s26)
+    ; CHECK-NEXT: $vgpr0 = COPY %keep1(s32)
+    %0:_(s32) = COPY $vgpr0
+    %src:_(s64) = G_ZEXT %0
+    %amt:_(s32) = G_CONSTANT i32 6
+    %shift:_(s64) = G_LSHR %src, %amt
+    %trunc:_(s26) = G_TRUNC %shift
+    %foo:_(s26) = G_CONSTANT i26 55
+    %keep0:_(s26) = G_ADD %trunc, %foo
+    %keep1:_(s32) = G_ANYEXT %keep0
+    $vgpr0 = COPY %keep1
+...
+
+---
+name:            s26_trunc_s64_lshr_7_nofold
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0
+    ; CHECK-LABEL: name: s26_trunc_s64_lshr_7_nofold
+    ; CHECK: liveins: $vgpr0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK-NEXT: %src:_(s64) = G_ZEXT [[COPY]](s32)
+    ; CHECK-NEXT: %amt:_(s32) = G_CONSTANT i32 7
+    ; CHECK-NEXT: %shift:_(s64) = G_LSHR %src, %amt(s32)
+    ; CHECK-NEXT: %trunc:_(s26) = G_TRUNC %shift(s64)
+    ; CHECK-NEXT: %foo:_(s26) = G_CONSTANT i26 55
+    ; CHECK-NEXT: %keep0:_(s26) = G_ADD %trunc, %foo
+    ; CHECK-NEXT: %keep1:_(s32) = G_ANYEXT %keep0(s26)
+    ; CHECK-NEXT: $vgpr0 = COPY %keep1(s32)
+    %0:_(s32) = COPY $vgpr0
+    %src:_(s64) = G_ZEXT %0
+    %amt:_(s32) = G_CONSTANT i32 7
+    %shift:_(s64) = G_LSHR %src, %amt
+    %trunc:_(s26) = G_TRUNC %shift
+    %foo:_(s26) = G_CONSTANT i26 55
+    %keep0:_(s26) = G_ADD %trunc, %foo
+    %keep1:_(s32) = G_ANYEXT %keep0
+    $vgpr0 = COPY %keep1
+...

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-trunc-shl.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-trunc-shl.mir
deleted file mode 100644
index fff36dfad533b..0000000000000
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-trunc-shl.mir
+++ /dev/null
@@ -1,73 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -run-pass=amdgpu-postlegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s
-
----
-name: trunc_s32_shl_s64_5
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $vgpr0_vgpr1
-
-    ; CHECK-LABEL: name: trunc_s32_shl_s64_5
-    ; CHECK: liveins: $vgpr0_vgpr1
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]](s32)
-    ; CHECK-NEXT: $vgpr0 = COPY [[SHL]](s32)
-    %0:_(s64) = COPY $vgpr0_vgpr1
-    %1:_(s32) = G_CONSTANT i32 1
-    %2:_(s64) = G_SHL %0:_, %1
-    %3:_(s32) = G_TRUNC %2
-    $vgpr0 = COPY %3
-...
-
----
-name: trunc_s16_shl_s32_5
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $vgpr0
-
-    ; CHECK-LABEL: name: trunc_s16_shl_s32_5
-    ; CHECK: liveins: $vgpr0
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32)
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
-    ; CHECK-NEXT: S_ENDPGM 0, implicit [[TRUNC]](s16)
-    %0:_(s32) = COPY $vgpr0
-    %1:_(s32) = G_CONSTANT i32 1
-    %2:_(s32) = G_SHL %0:_, %1
-    %3:_(s16) = G_TRUNC %2
-    S_ENDPGM 0, implicit %3
-
-...
-
----
-name: trunc_s16_shl_s64_5
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $vgpr0_vgpr1
-
-    ; CHECK-LABEL: name: trunc_s16_shl_s64_5
-    ; CHECK: liveins: $vgpr0_vgpr1
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s32)
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s64)
-    ; CHECK-NEXT: S_ENDPGM 0, implicit [[TRUNC]](s16)
-    %0:_(s64) = COPY $vgpr0_vgpr1
-    %1:_(s32) = G_CONSTANT i32 1
-    %2:_(s64) = G_SHL %0:_, %1
-    %3:_(s16) = G_TRUNC %2
-    S_ENDPGM 0, implicit %3
-
-...


        


More information about the llvm-commits mailing list