[llvm] [GISel] Combine shift + trunc + shift pattern (PR #155583)

via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 27 05:14:54 PDT 2025


https://github.com/jyli0116 updated https://github.com/llvm/llvm-project/pull/155583

>From 8705209988dfd8eeb537e22baa520f7453edafe7 Mon Sep 17 00:00:00 2001
From: Yu Li <yu.li at arm.com>
Date: Wed, 27 Aug 2025 09:19:23 +0000
Subject: [PATCH 1/3] [GISel] Combine shift + trunc + shift pattern

---
 .../llvm/CodeGen/GlobalISel/CombinerHelper.h  |  13 ++
 .../include/llvm/Target/GlobalISel/Combine.td |  11 +-
 .../lib/CodeGen/GlobalISel/CombinerHelper.cpp |  54 ++++++
 llvm/lib/Target/AArch64/AArch64Combine.td     |   4 +-
 llvm/test/CodeGen/AArch64/combine-sdiv.ll     |  26 +--
 llvm/test/CodeGen/AArch64/rem-by-const.ll     | 141 ++++++--------
 llvm/test/CodeGen/AArch64/urem-lkk.ll         |  29 +--
 llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll   | 176 +++++++++---------
 llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll   | 158 ++++++++--------
 llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll   |   8 +-
 .../AMDGPU/GlobalISel/store-local.128.ll      | 105 ++++++-----
 .../AMDGPU/GlobalISel/store-local.96.ll       |  96 +++++-----
 llvm/test/CodeGen/AMDGPU/ds-alignment.ll      |  42 ++---
 13 files changed, 449 insertions(+), 414 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 6dba689e8af71..40f612cc98bcc 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -81,6 +81,13 @@ struct ShiftOfShiftedLogic {
   uint64_t ValSum;
 };
 
+struct ShiftOfTruncOfShift {
+  Register Src;
+  uint64_t ShiftAmt;
+  LLT ShiftAmtTy;
+  LLT InnerShiftTy;
+};
+
 using BuildFnTy = std::function<void(MachineIRBuilder &)>;
 
 using OperandBuildSteps =
@@ -338,6 +345,12 @@ class CombinerHelper {
 
   bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) const;
 
+  /// Fold (shift (trunc (shift x, C1)), C2) -> trunc (shift x, (C1 + C2))
+  bool matchShiftOfTruncOfShift(MachineInstr &MI,
+                                ShiftOfTruncOfShift &MatchInfo) const;
+  void applyShiftOfTruncOfShift(MachineInstr &MI,
+                                ShiftOfTruncOfShift &MatchInfo) const;
+
   /// Transform a multiply by a power-of-2 value to a left shift.
   bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const;
   void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const;
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 9564b581c5ebb..46e41a5cc4c79 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -396,6 +396,14 @@ def commute_shift : GICombineRule<
          [{ return Helper.matchCommuteShift(*${d}, ${matchinfo}); }]),
   (apply [{ Helper.applyBuildFn(*${d}, ${matchinfo}); }])>;
 
+// Fold (shift (trunc (shift x, C1)), C2) -> trunc (shift x, (C1 + C2))
+def shift_of_trunc_of_shift_matchdata : GIDefMatchData<"ShiftOfTruncOfShift">;
+def shift_of_trunc_of_shift : GICombineRule<
+  (defs root:$root, shift_of_trunc_of_shift_matchdata:$matchinfo),
+  (match (wip_match_opcode G_LSHR, G_ASHR):$root,
+         [{ return Helper.matchShiftOfTruncOfShift(*${root}, ${matchinfo}); }]),
+  (apply [{ Helper.applyShiftOfTruncOfShift(*${root}, ${matchinfo}); }])>;
+
 def narrow_binop_feeding_and : GICombineRule<
   (defs root:$root, build_fn_matchinfo:$matchinfo),
   (match (wip_match_opcode G_AND):$root,
@@ -2103,7 +2111,8 @@ def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines,
     fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors,
     simplify_neg_minmax, combine_concat_vector,
     sext_trunc, zext_trunc, prefer_sign_combines, shuffle_combines,
-    combine_use_vector_truncate, merge_combines, overflow_combines, truncsat_combines]>;
+    combine_use_vector_truncate, merge_combines, overflow_combines, 
+    truncsat_combines, shift_of_trunc_of_shift]>;
 
 // A combine group used to for prelegalizer combiners at -O0. The combines in
 // this group have been selected based on experiments to balance code size and
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 0674f5fd1ae06..d3f0731955353 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -2094,6 +2094,60 @@ bool CombinerHelper::matchCommuteShift(MachineInstr &MI,
   return true;
 }
 
+bool CombinerHelper::matchShiftOfTruncOfShift(
+    MachineInstr &MI, ShiftOfTruncOfShift &MatchInfo) const {
+  unsigned ShiftOpcode = MI.getOpcode();
+  assert(ShiftOpcode == TargetOpcode::G_LSHR ||
+         ShiftOpcode == TargetOpcode::G_ASHR);
+
+  Register N0 = MI.getOperand(1).getReg();
+  Register N1 = MI.getOperand(2).getReg();
+  unsigned OpSizeInBits = MRI.getType(N0).getScalarSizeInBits();
+
+  APInt N1C;
+  Register InnerShift;
+  if (!mi_match(N1, MRI, m_ICstOrSplat(N1C)) ||
+      !mi_match(N0, MRI, m_GTrunc(m_Reg(InnerShift))))
+    return false;
+
+  auto *InnerMI = MRI.getVRegDef(InnerShift);
+  if (InnerMI->getOpcode() != ShiftOpcode)
+    return false;
+
+  APInt N001C;
+  auto N001 = InnerMI->getOperand(2).getReg();
+  if (!mi_match(N001, MRI, m_ICstOrSplat(N001C)))
+    return false;
+
+  uint64_t c1 = N001C.getZExtValue();
+  uint64_t c2 = N1C.getZExtValue();
+  LLT InnerShiftTy = MRI.getType(InnerShift);
+  uint64_t InnerShiftSize = InnerShiftTy.getScalarSizeInBits();
+  if (!(c1 + OpSizeInBits == InnerShiftSize) || !(c1 + c2 < InnerShiftSize))
+    return false;
+
+  MatchInfo.Src = InnerMI->getOperand(1).getReg();
+  MatchInfo.ShiftAmt = c1 + c2;
+  MatchInfo.ShiftAmtTy = MRI.getType(N001);
+  MatchInfo.InnerShiftTy = InnerShiftTy;
+  return true;
+}
+
+void CombinerHelper::applyShiftOfTruncOfShift(
+    MachineInstr &MI, ShiftOfTruncOfShift &MatchInfo) const {
+  unsigned ShiftOpcode = MI.getOpcode();
+  assert(ShiftOpcode == TargetOpcode::G_LSHR ||
+         ShiftOpcode == TargetOpcode::G_ASHR);
+
+  Register Dst = MI.getOperand(0).getReg();
+  auto ShiftAmt =
+      Builder.buildConstant(MatchInfo.ShiftAmtTy, MatchInfo.ShiftAmt);
+  auto Shift = Builder.buildInstr(ShiftOpcode, {MatchInfo.InnerShiftTy},
+                                  {MatchInfo.Src, ShiftAmt});
+  Builder.buildTrunc(Dst, Shift);
+  MI.eraseFromParent();
+}
+
 bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI,
                                           unsigned &ShiftVal) const {
   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index 5f499e5e9700a..e44819ad5a4ae 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -349,6 +349,8 @@ def AArch64PostLegalizerLowering
 }
 
 // Post-legalization combines which are primarily optimizations.
+
+
 def AArch64PostLegalizerCombiner
     : GICombiner<"AArch64PostLegalizerCombinerImpl",
                        [copy_prop, cast_of_cast_combines, 
@@ -369,5 +371,5 @@ def AArch64PostLegalizerCombiner
                         commute_constant_to_rhs, extract_vec_elt_combines,
                         push_freeze_to_prevent_poison_from_propagating,
                         combine_mul_cmlt, combine_use_vector_truncate, 
-                        extmultomull, truncsat_combines]> {
+                        extmultomull, truncsat_combines, shift_of_trunc_of_shift]> {
 }
diff --git a/llvm/test/CodeGen/AArch64/combine-sdiv.ll b/llvm/test/CodeGen/AArch64/combine-sdiv.ll
index 9d0ade2480428..014eaee5ebb2f 100644
--- a/llvm/test/CodeGen/AArch64/combine-sdiv.ll
+++ b/llvm/test/CodeGen/AArch64/combine-sdiv.ll
@@ -1684,24 +1684,14 @@ define i32 @combine_i32_sdiv_const7(i32 %x) {
 }
 
 define i32 @combine_i32_sdiv_const100(i32 %x) {
-; CHECK-SD-LABEL: combine_i32_sdiv_const100:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    mov w8, #34079 // =0x851f
-; CHECK-SD-NEXT:    movk w8, #20971, lsl #16
-; CHECK-SD-NEXT:    smull x8, w0, w8
-; CHECK-SD-NEXT:    asr x8, x8, #37
-; CHECK-SD-NEXT:    add w0, w8, w8, lsr #31
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: combine_i32_sdiv_const100:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    mov w8, #34079 // =0x851f
-; CHECK-GI-NEXT:    movk w8, #20971, lsl #16
-; CHECK-GI-NEXT:    smull x8, w0, w8
-; CHECK-GI-NEXT:    asr x8, x8, #32
-; CHECK-GI-NEXT:    asr w8, w8, #5
-; CHECK-GI-NEXT:    add w0, w8, w8, lsr #31
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: combine_i32_sdiv_const100:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #34079 // =0x851f
+; CHECK-NEXT:    movk w8, #20971, lsl #16
+; CHECK-NEXT:    smull x8, w0, w8
+; CHECK-NEXT:    asr x8, x8, #37
+; CHECK-NEXT:    add w0, w8, w8, lsr #31
+; CHECK-NEXT:    ret
   %1 = sdiv i32 %x, 100
   ret i32 %1
 }
diff --git a/llvm/test/CodeGen/AArch64/rem-by-const.ll b/llvm/test/CodeGen/AArch64/rem-by-const.ll
index c57383ad9b1e7..f36a87794be35 100644
--- a/llvm/test/CodeGen/AArch64/rem-by-const.ll
+++ b/llvm/test/CodeGen/AArch64/rem-by-const.ll
@@ -276,28 +276,16 @@ entry:
 }
 
 define i32 @si32_100(i32 %a, i32 %b) {
-; CHECK-SD-LABEL: si32_100:
-; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    mov w8, #34079 // =0x851f
-; CHECK-SD-NEXT:    mov w9, #100 // =0x64
-; CHECK-SD-NEXT:    movk w8, #20971, lsl #16
-; CHECK-SD-NEXT:    smull x8, w0, w8
-; CHECK-SD-NEXT:    asr x8, x8, #37
-; CHECK-SD-NEXT:    add w8, w8, w8, lsr #31
-; CHECK-SD-NEXT:    msub w0, w8, w9, w0
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: si32_100:
-; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    mov w8, #34079 // =0x851f
-; CHECK-GI-NEXT:    mov w9, #100 // =0x64
-; CHECK-GI-NEXT:    movk w8, #20971, lsl #16
-; CHECK-GI-NEXT:    smull x8, w0, w8
-; CHECK-GI-NEXT:    asr x8, x8, #32
-; CHECK-GI-NEXT:    asr w8, w8, #5
-; CHECK-GI-NEXT:    add w8, w8, w8, lsr #31
-; CHECK-GI-NEXT:    msub w0, w8, w9, w0
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: si32_100:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov w8, #34079 // =0x851f
+; CHECK-NEXT:    mov w9, #100 // =0x64
+; CHECK-NEXT:    movk w8, #20971, lsl #16
+; CHECK-NEXT:    smull x8, w0, w8
+; CHECK-NEXT:    asr x8, x8, #37
+; CHECK-NEXT:    add w8, w8, w8, lsr #31
+; CHECK-NEXT:    msub w0, w8, w9, w0
+; CHECK-NEXT:    ret
 entry:
   %s = srem i32 %a, 100
   ret i32 %s
@@ -336,26 +324,15 @@ entry:
 }
 
 define i32 @ui32_100(i32 %a, i32 %b) {
-; CHECK-SD-LABEL: ui32_100:
-; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    mov w8, #34079 // =0x851f
-; CHECK-SD-NEXT:    mov w9, #100 // =0x64
-; CHECK-SD-NEXT:    movk w8, #20971, lsl #16
-; CHECK-SD-NEXT:    umull x8, w0, w8
-; CHECK-SD-NEXT:    lsr x8, x8, #37
-; CHECK-SD-NEXT:    msub w0, w8, w9, w0
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: ui32_100:
-; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    mov w8, #34079 // =0x851f
-; CHECK-GI-NEXT:    mov w9, #100 // =0x64
-; CHECK-GI-NEXT:    movk w8, #20971, lsl #16
-; CHECK-GI-NEXT:    umull x8, w0, w8
-; CHECK-GI-NEXT:    lsr x8, x8, #32
-; CHECK-GI-NEXT:    lsr w8, w8, #5
-; CHECK-GI-NEXT:    msub w0, w8, w9, w0
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: ui32_100:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov w8, #34079 // =0x851f
+; CHECK-NEXT:    mov w9, #100 // =0x64
+; CHECK-NEXT:    movk w8, #20971, lsl #16
+; CHECK-NEXT:    umull x8, w0, w8
+; CHECK-NEXT:    lsr x8, x8, #37
+; CHECK-NEXT:    msub w0, w8, w9, w0
+; CHECK-NEXT:    ret
 entry:
   %s = urem i32 %a, 100
   ret i32 %s
@@ -1118,13 +1095,12 @@ define <8 x i8> @sv8i8_100(<8 x i8> %d, <8 x i8> %e) {
 ; CHECK-GI-LABEL: sv8i8_100:
 ; CHECK-GI:       // %bb.0: // %entry
 ; CHECK-GI-NEXT:    movi v1.8b, #41
-; CHECK-GI-NEXT:    movi v3.8b, #100
+; CHECK-GI-NEXT:    movi v2.8b, #100
 ; CHECK-GI-NEXT:    smull v1.8h, v0.8b, v1.8b
-; CHECK-GI-NEXT:    shrn v1.8b, v1.8h, #8
-; CHECK-GI-NEXT:    sshr v2.8b, v1.8b, #4
-; CHECK-GI-NEXT:    ushr v2.8b, v2.8b, #7
-; CHECK-GI-NEXT:    ssra v2.8b, v1.8b, #4
-; CHECK-GI-NEXT:    mls v0.8b, v2.8b, v3.8b
+; CHECK-GI-NEXT:    sshr v1.8h, v1.8h, #12
+; CHECK-GI-NEXT:    xtn v1.8b, v1.8h
+; CHECK-GI-NEXT:    usra v1.8b, v1.8b, #7
+; CHECK-GI-NEXT:    mls v0.8b, v1.8b, v2.8b
 ; CHECK-GI-NEXT:    ret
 entry:
   %s = srem <8 x i8> %d, <i8 100, i8 100, i8 100, i8 100, i8 100, i8 100, i8 100, i8 100>
@@ -1619,15 +1595,25 @@ entry:
 }
 
 define <8 x i8> @uv8i8_100(<8 x i8> %d, <8 x i8> %e) {
-; CHECK-LABEL: uv8i8_100:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v1.8b, #41
-; CHECK-NEXT:    movi v2.8b, #100
-; CHECK-NEXT:    umull v1.8h, v0.8b, v1.8b
-; CHECK-NEXT:    shrn v1.8b, v1.8h, #8
-; CHECK-NEXT:    ushr v1.8b, v1.8b, #4
-; CHECK-NEXT:    mls v0.8b, v1.8b, v2.8b
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: uv8i8_100:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    movi v1.8b, #41
+; CHECK-SD-NEXT:    movi v2.8b, #100
+; CHECK-SD-NEXT:    umull v1.8h, v0.8b, v1.8b
+; CHECK-SD-NEXT:    shrn v1.8b, v1.8h, #8
+; CHECK-SD-NEXT:    ushr v1.8b, v1.8b, #4
+; CHECK-SD-NEXT:    mls v0.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: uv8i8_100:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    movi v1.8b, #41
+; CHECK-GI-NEXT:    movi v2.8b, #100
+; CHECK-GI-NEXT:    umull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT:    ushr v1.8h, v1.8h, #12
+; CHECK-GI-NEXT:    xtn v1.8b, v1.8h
+; CHECK-GI-NEXT:    mls v0.8b, v1.8b, v2.8b
+; CHECK-GI-NEXT:    ret
 entry:
   %s = urem <8 x i8> %d, <i8 100, i8 100, i8 100, i8 100, i8 100, i8 100, i8 100, i8 100>
   ret <8 x i8> %s
@@ -1904,14 +1890,13 @@ define <4 x i16> @sv4i16_7(<4 x i16> %d, <4 x i16> %e) {
 ; CHECK-GI-LABEL: sv4i16_7:
 ; CHECK-GI:       // %bb.0: // %entry
 ; CHECK-GI-NEXT:    adrp x8, .LCPI44_0
-; CHECK-GI-NEXT:    movi v3.4h, #7
+; CHECK-GI-NEXT:    movi v2.4h, #7
 ; CHECK-GI-NEXT:    ldr d1, [x8, :lo12:.LCPI44_0]
 ; CHECK-GI-NEXT:    smull v1.4s, v0.4h, v1.4h
-; CHECK-GI-NEXT:    shrn v1.4h, v1.4s, #16
-; CHECK-GI-NEXT:    sshr v2.4h, v1.4h, #1
-; CHECK-GI-NEXT:    ushr v2.4h, v2.4h, #15
-; CHECK-GI-NEXT:    ssra v2.4h, v1.4h, #1
-; CHECK-GI-NEXT:    mls v0.4h, v2.4h, v3.4h
+; CHECK-GI-NEXT:    sshr v1.4s, v1.4s, #17
+; CHECK-GI-NEXT:    xtn v1.4h, v1.4s
+; CHECK-GI-NEXT:    usra v1.4h, v1.4h, #15
+; CHECK-GI-NEXT:    mls v0.4h, v1.4h, v2.4h
 ; CHECK-GI-NEXT:    ret
 entry:
   %s = srem <4 x i16> %d, <i16 7, i16 7, i16 7, i16 7>
@@ -1934,14 +1919,13 @@ define <4 x i16> @sv4i16_100(<4 x i16> %d, <4 x i16> %e) {
 ; CHECK-GI-LABEL: sv4i16_100:
 ; CHECK-GI:       // %bb.0: // %entry
 ; CHECK-GI-NEXT:    adrp x8, .LCPI45_0
-; CHECK-GI-NEXT:    movi v3.4h, #100
+; CHECK-GI-NEXT:    movi v2.4h, #100
 ; CHECK-GI-NEXT:    ldr d1, [x8, :lo12:.LCPI45_0]
 ; CHECK-GI-NEXT:    smull v1.4s, v0.4h, v1.4h
-; CHECK-GI-NEXT:    shrn v1.4h, v1.4s, #16
-; CHECK-GI-NEXT:    sshr v2.4h, v1.4h, #3
-; CHECK-GI-NEXT:    ushr v2.4h, v2.4h, #15
-; CHECK-GI-NEXT:    ssra v2.4h, v1.4h, #3
-; CHECK-GI-NEXT:    mls v0.4h, v2.4h, v3.4h
+; CHECK-GI-NEXT:    sshr v1.4s, v1.4s, #19
+; CHECK-GI-NEXT:    xtn v1.4h, v1.4s
+; CHECK-GI-NEXT:    usra v1.4h, v1.4h, #15
+; CHECK-GI-NEXT:    mls v0.4h, v1.4h, v2.4h
 ; CHECK-GI-NEXT:    ret
 entry:
   %s = srem <4 x i16> %d, <i16 100, i16 100, i16 100, i16 100>
@@ -2301,8 +2285,8 @@ define <4 x i16> @uv4i16_100(<4 x i16> %d, <4 x i16> %e) {
 ; CHECK-GI-NEXT:    ldr d2, [x8, :lo12:.LCPI53_0]
 ; CHECK-GI-NEXT:    umull v1.4s, v1.4h, v2.4h
 ; CHECK-GI-NEXT:    movi v2.4h, #100
-; CHECK-GI-NEXT:    shrn v1.4h, v1.4s, #16
-; CHECK-GI-NEXT:    ushr v1.4h, v1.4h, #1
+; CHECK-GI-NEXT:    ushr v1.4s, v1.4s, #17
+; CHECK-GI-NEXT:    xtn v1.4h, v1.4s
 ; CHECK-GI-NEXT:    mls v0.4h, v1.4h, v2.4h
 ; CHECK-GI-NEXT:    ret
 entry:
@@ -2424,14 +2408,13 @@ define <2 x i32> @sv2i32_100(<2 x i32> %d, <2 x i32> %e) {
 ; CHECK-GI-LABEL: sv2i32_100:
 ; CHECK-GI:       // %bb.0: // %entry
 ; CHECK-GI-NEXT:    adrp x8, .LCPI57_0
-; CHECK-GI-NEXT:    movi v3.2s, #100
+; CHECK-GI-NEXT:    movi v2.2s, #100
 ; CHECK-GI-NEXT:    ldr d1, [x8, :lo12:.LCPI57_0]
 ; CHECK-GI-NEXT:    smull v1.2d, v0.2s, v1.2s
-; CHECK-GI-NEXT:    shrn v1.2s, v1.2d, #32
-; CHECK-GI-NEXT:    sshr v2.2s, v1.2s, #5
-; CHECK-GI-NEXT:    ushr v2.2s, v2.2s, #31
-; CHECK-GI-NEXT:    ssra v2.2s, v1.2s, #5
-; CHECK-GI-NEXT:    mls v0.2s, v2.2s, v3.2s
+; CHECK-GI-NEXT:    sshr v1.2d, v1.2d, #37
+; CHECK-GI-NEXT:    xtn v1.2s, v1.2d
+; CHECK-GI-NEXT:    usra v1.2s, v1.2s, #31
+; CHECK-GI-NEXT:    mls v0.2s, v1.2s, v2.2s
 ; CHECK-GI-NEXT:    ret
 entry:
   %s = srem <2 x i32> %d, <i32 100, i32 100>
@@ -2656,8 +2639,8 @@ define <2 x i32> @uv2i32_100(<2 x i32> %d, <2 x i32> %e) {
 ; CHECK-GI-NEXT:    movi v2.2s, #100
 ; CHECK-GI-NEXT:    ldr d1, [x8, :lo12:.LCPI63_0]
 ; CHECK-GI-NEXT:    umull v1.2d, v0.2s, v1.2s
-; CHECK-GI-NEXT:    shrn v1.2s, v1.2d, #32
-; CHECK-GI-NEXT:    ushr v1.2s, v1.2s, #5
+; CHECK-GI-NEXT:    ushr v1.2d, v1.2d, #37
+; CHECK-GI-NEXT:    xtn v1.2s, v1.2d
 ; CHECK-GI-NEXT:    mls v0.2s, v1.2s, v2.2s
 ; CHECK-GI-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/AArch64/urem-lkk.ll b/llvm/test/CodeGen/AArch64/urem-lkk.ll
index 0dd6685555826..40016c7e4ce0f 100644
--- a/llvm/test/CodeGen/AArch64/urem-lkk.ll
+++ b/llvm/test/CodeGen/AArch64/urem-lkk.ll
@@ -20,26 +20,15 @@ define i32 @fold_urem_positive_odd(i32 %x) {
 }
 
 define i32 @fold_urem_positive_even(i32 %x) {
-; CHECK-SD-LABEL: fold_urem_positive_even:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    mov w8, #16323 // =0x3fc3
-; CHECK-SD-NEXT:    mov w9, #1060 // =0x424
-; CHECK-SD-NEXT:    movk w8, #63310, lsl #16
-; CHECK-SD-NEXT:    umull x8, w0, w8
-; CHECK-SD-NEXT:    lsr x8, x8, #42
-; CHECK-SD-NEXT:    msub w0, w8, w9, w0
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: fold_urem_positive_even:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    mov w8, #16323 // =0x3fc3
-; CHECK-GI-NEXT:    mov w9, #1060 // =0x424
-; CHECK-GI-NEXT:    movk w8, #63310, lsl #16
-; CHECK-GI-NEXT:    umull x8, w0, w8
-; CHECK-GI-NEXT:    lsr x8, x8, #32
-; CHECK-GI-NEXT:    lsr w8, w8, #10
-; CHECK-GI-NEXT:    msub w0, w8, w9, w0
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: fold_urem_positive_even:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #16323 // =0x3fc3
+; CHECK-NEXT:    mov w9, #1060 // =0x424
+; CHECK-NEXT:    movk w8, #63310, lsl #16
+; CHECK-NEXT:    umull x8, w0, w8
+; CHECK-NEXT:    lsr x8, x8, #42
+; CHECK-NEXT:    msub w0, w8, w9, w0
+; CHECK-NEXT:    ret
   %1 = urem i32 %x, 1060
   ret i32 %1
 }
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
index fc81e16d68e98..7869a81cb9705 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
@@ -3686,22 +3686,21 @@ define amdgpu_ps i32 @s_fshl_v2i16(<2 x i16> inreg %lhs, <2 x i16> inreg %rhs, <
 ;
 ; GFX8-LABEL: s_fshl_v2i16:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_lshr_b32 s4, s1, 16
-; GFX8-NEXT:    s_and_b32 s1, 0xffff, s1
-; GFX8-NEXT:    s_lshr_b32 s5, s2, 16
-; GFX8-NEXT:    s_and_b32 s6, s2, 15
-; GFX8-NEXT:    s_andn2_b32 s2, 15, s2
-; GFX8-NEXT:    s_lshr_b32 s1, s1, 1
+; GFX8-NEXT:    s_and_b32 s5, s2, 15
 ; GFX8-NEXT:    s_lshr_b32 s3, s0, 16
-; GFX8-NEXT:    s_lshl_b32 s0, s0, s6
-; GFX8-NEXT:    s_lshr_b32 s1, s1, s2
-; GFX8-NEXT:    s_or_b32 s0, s0, s1
-; GFX8-NEXT:    s_and_b32 s1, s5, 15
-; GFX8-NEXT:    s_andn2_b32 s2, 15, s5
-; GFX8-NEXT:    s_lshl_b32 s1, s3, s1
-; GFX8-NEXT:    s_lshr_b32 s3, s4, 1
-; GFX8-NEXT:    s_lshr_b32 s2, s3, s2
-; GFX8-NEXT:    s_or_b32 s1, s1, s2
+; GFX8-NEXT:    s_lshl_b32 s0, s0, s5
+; GFX8-NEXT:    s_and_b32 s5, 0xffff, s1
+; GFX8-NEXT:    s_lshr_b32 s4, s2, 16
+; GFX8-NEXT:    s_andn2_b32 s2, 15, s2
+; GFX8-NEXT:    s_lshr_b32 s5, s5, 1
+; GFX8-NEXT:    s_lshr_b32 s2, s5, s2
+; GFX8-NEXT:    s_or_b32 s0, s0, s2
+; GFX8-NEXT:    s_and_b32 s2, s4, 15
+; GFX8-NEXT:    s_andn2_b32 s4, 15, s4
+; GFX8-NEXT:    s_lshr_b32 s1, s1, 17
+; GFX8-NEXT:    s_lshl_b32 s2, s3, s2
+; GFX8-NEXT:    s_lshr_b32 s1, s1, s4
+; GFX8-NEXT:    s_or_b32 s1, s2, s1
 ; GFX8-NEXT:    s_and_b32 s1, 0xffff, s1
 ; GFX8-NEXT:    s_and_b32 s0, 0xffff, s0
 ; GFX8-NEXT:    s_lshl_b32 s1, s1, 16
@@ -3813,13 +3812,12 @@ define <2 x i16> @v_fshl_v2i16(<2 x i16> %lhs, <2 x i16> %rhs, <2 x i16> %amt) {
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v4, v4, v5
 ; GFX8-NEXT:    v_or_b32_e32 v3, v3, v4
 ; GFX8-NEXT:    v_mov_b32_e32 v4, 15
-; GFX8-NEXT:    v_and_b32_sdwa v4, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_mov_b32_e32 v5, -1
+; GFX8-NEXT:    v_and_b32_sdwa v4, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_xor_b32_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    v_mov_b32_e32 v4, 1
 ; GFX8-NEXT:    v_and_b32_e32 v2, 15, v2
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b32_e32 v1, 17, v1
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v1, v2, v1
 ; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff, v0
@@ -3886,14 +3884,14 @@ define <2 x i16> @v_fshl_v2i16_4_8(<2 x i16> %lhs, <2 x i16> %rhs) {
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
-; GFX8-NEXT:    v_lshlrev_b16_e32 v0, 4, v0
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v3, 12, v1
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v3
-; GFX8-NEXT:    v_mov_b32_e32 v3, 8
+; GFX8-NEXT:    v_lshrrev_b32_e32 v1, 17, v1
 ; GFX8-NEXT:    v_lshlrev_b16_e32 v2, 8, v2
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v1, 7, v1
 ; GFX8-NEXT:    v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT:    v_lshlrev_b16_e32 v0, 4, v0
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v3
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX8-NEXT:    v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
@@ -3964,11 +3962,10 @@ define amdgpu_ps float @v_fshl_v2i16_ssv(<2 x i16> inreg %lhs, <2 x i16> inreg %
 ; GFX8-NEXT:    v_or_b32_e32 v1, v1, v2
 ; GFX8-NEXT:    v_mov_b32_e32 v2, 15
 ; GFX8-NEXT:    v_mov_b32_e32 v3, -1
-; GFX8-NEXT:    s_lshr_b32 s3, s1, 16
 ; GFX8-NEXT:    v_and_b32_sdwa v2, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_xor_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_and_b32_e32 v0, 15, v0
-; GFX8-NEXT:    s_lshr_b32 s0, s3, 1
+; GFX8-NEXT:    s_lshr_b32 s0, s1, 17
 ; GFX8-NEXT:    v_lshlrev_b16_e64 v2, v2, s2
 ; GFX8-NEXT:    v_lshrrev_b16_e64 v0, v0, s0
 ; GFX8-NEXT:    v_or_b32_e32 v0, v2, v0
@@ -4058,11 +4055,10 @@ define amdgpu_ps float @v_fshl_v2i16_svs(<2 x i16> inreg %lhs, <2 x i16> %rhs, <
 ; GFX8-NEXT:    s_lshr_b32 s2, s0, 16
 ; GFX8-NEXT:    s_lshl_b32 s0, s0, s4
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v1, s1, v1
-; GFX8-NEXT:    v_mov_b32_e32 v2, 1
 ; GFX8-NEXT:    v_or_b32_e32 v1, s0, v1
 ; GFX8-NEXT:    s_and_b32 s0, s3, 15
 ; GFX8-NEXT:    s_andn2_b32 s1, 15, s3
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b32_e32 v0, 17, v0
 ; GFX8-NEXT:    s_lshl_b32 s0, s2, s0
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v0, s1, v0
 ; GFX8-NEXT:    v_or_b32_e32 v0, s0, v0
@@ -4142,21 +4138,20 @@ define amdgpu_ps float @v_fshl_v2i16_vss(<2 x i16> %lhs, <2 x i16> inreg %rhs, <
 ;
 ; GFX8-LABEL: v_fshl_v2i16_vss:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_lshr_b32 s2, s0, 16
-; GFX8-NEXT:    s_and_b32 s0, 0xffff, s0
-; GFX8-NEXT:    s_lshr_b32 s3, s1, 16
-; GFX8-NEXT:    s_and_b32 s4, s1, 15
+; GFX8-NEXT:    s_and_b32 s3, s1, 15
+; GFX8-NEXT:    v_lshlrev_b16_e32 v1, s3, v0
+; GFX8-NEXT:    s_and_b32 s3, 0xffff, s0
+; GFX8-NEXT:    s_lshr_b32 s2, s1, 16
 ; GFX8-NEXT:    s_andn2_b32 s1, 15, s1
-; GFX8-NEXT:    s_lshr_b32 s0, s0, 1
-; GFX8-NEXT:    v_lshlrev_b16_e32 v1, s4, v0
-; GFX8-NEXT:    s_lshr_b32 s0, s0, s1
-; GFX8-NEXT:    v_or_b32_e32 v1, s0, v1
-; GFX8-NEXT:    s_and_b32 s0, s3, 15
-; GFX8-NEXT:    s_andn2_b32 s1, 15, s3
-; GFX8-NEXT:    v_mov_b32_e32 v2, s0
-; GFX8-NEXT:    s_lshr_b32 s0, s2, 1
+; GFX8-NEXT:    s_lshr_b32 s3, s3, 1
+; GFX8-NEXT:    s_lshr_b32 s1, s3, s1
+; GFX8-NEXT:    v_or_b32_e32 v1, s1, v1
+; GFX8-NEXT:    s_and_b32 s1, s2, 15
+; GFX8-NEXT:    s_andn2_b32 s2, 15, s2
+; GFX8-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NEXT:    s_lshr_b32 s0, s0, 17
 ; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    s_lshr_b32 s0, s0, s1
+; GFX8-NEXT:    s_lshr_b32 s0, s0, s2
 ; GFX8-NEXT:    v_or_b32_e32 v0, s0, v0
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff, v0
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
@@ -4256,23 +4251,22 @@ define amdgpu_ps i48 @s_fshl_v3i16(<3 x i16> inreg %lhs, <3 x i16> inreg %rhs, <
 ;
 ; GFX8-LABEL: s_fshl_v3i16:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_lshr_b32 s7, s2, 16
-; GFX8-NEXT:    s_and_b32 s2, 0xffff, s2
-; GFX8-NEXT:    s_lshr_b32 s8, s4, 16
-; GFX8-NEXT:    s_and_b32 s9, s4, 15
-; GFX8-NEXT:    s_andn2_b32 s4, 15, s4
-; GFX8-NEXT:    s_lshr_b32 s2, s2, 1
+; GFX8-NEXT:    s_and_b32 s8, s4, 15
 ; GFX8-NEXT:    s_lshr_b32 s6, s0, 16
-; GFX8-NEXT:    s_lshl_b32 s0, s0, s9
-; GFX8-NEXT:    s_lshr_b32 s2, s2, s4
-; GFX8-NEXT:    s_or_b32 s0, s0, s2
-; GFX8-NEXT:    s_and_b32 s2, s8, 15
-; GFX8-NEXT:    s_andn2_b32 s4, 15, s8
-; GFX8-NEXT:    s_lshl_b32 s2, s6, s2
-; GFX8-NEXT:    s_lshr_b32 s6, s7, 1
-; GFX8-NEXT:    s_lshr_b32 s4, s6, s4
+; GFX8-NEXT:    s_lshl_b32 s0, s0, s8
+; GFX8-NEXT:    s_and_b32 s8, 0xffff, s2
+; GFX8-NEXT:    s_lshr_b32 s7, s4, 16
+; GFX8-NEXT:    s_andn2_b32 s4, 15, s4
+; GFX8-NEXT:    s_lshr_b32 s8, s8, 1
+; GFX8-NEXT:    s_lshr_b32 s4, s8, s4
+; GFX8-NEXT:    s_or_b32 s0, s0, s4
+; GFX8-NEXT:    s_and_b32 s4, s7, 15
+; GFX8-NEXT:    s_andn2_b32 s7, 15, s7
+; GFX8-NEXT:    s_lshr_b32 s2, s2, 17
+; GFX8-NEXT:    s_lshl_b32 s4, s6, s4
+; GFX8-NEXT:    s_lshr_b32 s2, s2, s7
 ; GFX8-NEXT:    s_and_b32 s3, 0xffff, s3
-; GFX8-NEXT:    s_or_b32 s2, s2, s4
+; GFX8-NEXT:    s_or_b32 s2, s4, s2
 ; GFX8-NEXT:    s_and_b32 s4, s5, 15
 ; GFX8-NEXT:    s_andn2_b32 s5, 15, s5
 ; GFX8-NEXT:    s_lshr_b32 s3, s3, 1
@@ -4469,13 +4463,12 @@ define <3 x half> @v_fshl_v3i16(<3 x i16> %lhs, <3 x i16> %rhs, <3 x i16> %amt)
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v7, v7, v8
 ; GFX8-NEXT:    v_or_b32_e32 v6, v6, v7
 ; GFX8-NEXT:    v_mov_b32_e32 v7, 15
-; GFX8-NEXT:    v_and_b32_sdwa v7, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_mov_b32_e32 v8, -1
+; GFX8-NEXT:    v_and_b32_sdwa v7, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_xor_b32_sdwa v4, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    v_mov_b32_e32 v7, 1
 ; GFX8-NEXT:    v_and_b32_e32 v4, 15, v4
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v2, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 17, v2
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v2, v4, v2
 ; GFX8-NEXT:    v_or_b32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_and_b32_e32 v2, 15, v5
@@ -4593,39 +4586,37 @@ define amdgpu_ps <2 x i32> @s_fshl_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %
 ;
 ; GFX8-LABEL: s_fshl_v4i16:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_lshr_b32 s8, s2, 16
-; GFX8-NEXT:    s_and_b32 s2, 0xffff, s2
-; GFX8-NEXT:    s_lshr_b32 s10, s4, 16
-; GFX8-NEXT:    s_and_b32 s12, s4, 15
-; GFX8-NEXT:    s_andn2_b32 s4, 15, s4
-; GFX8-NEXT:    s_lshr_b32 s2, s2, 1
+; GFX8-NEXT:    s_and_b32 s10, s4, 15
 ; GFX8-NEXT:    s_lshr_b32 s6, s0, 16
-; GFX8-NEXT:    s_lshl_b32 s0, s0, s12
-; GFX8-NEXT:    s_lshr_b32 s2, s2, s4
-; GFX8-NEXT:    s_or_b32 s0, s0, s2
-; GFX8-NEXT:    s_and_b32 s2, s10, 15
-; GFX8-NEXT:    s_andn2_b32 s4, 15, s10
-; GFX8-NEXT:    s_lshl_b32 s2, s6, s2
-; GFX8-NEXT:    s_lshr_b32 s6, s8, 1
-; GFX8-NEXT:    s_lshr_b32 s9, s3, 16
-; GFX8-NEXT:    s_lshr_b32 s4, s6, s4
-; GFX8-NEXT:    s_and_b32 s3, 0xffff, s3
-; GFX8-NEXT:    s_lshr_b32 s11, s5, 16
-; GFX8-NEXT:    s_or_b32 s2, s2, s4
+; GFX8-NEXT:    s_lshl_b32 s0, s0, s10
+; GFX8-NEXT:    s_and_b32 s10, 0xffff, s2
+; GFX8-NEXT:    s_lshr_b32 s8, s4, 16
+; GFX8-NEXT:    s_andn2_b32 s4, 15, s4
+; GFX8-NEXT:    s_lshr_b32 s10, s10, 1
+; GFX8-NEXT:    s_lshr_b32 s4, s10, s4
+; GFX8-NEXT:    s_or_b32 s0, s0, s4
+; GFX8-NEXT:    s_and_b32 s4, s8, 15
+; GFX8-NEXT:    s_andn2_b32 s8, 15, s8
+; GFX8-NEXT:    s_lshr_b32 s2, s2, 17
+; GFX8-NEXT:    s_lshl_b32 s4, s6, s4
+; GFX8-NEXT:    s_lshr_b32 s2, s2, s8
+; GFX8-NEXT:    s_or_b32 s2, s4, s2
 ; GFX8-NEXT:    s_and_b32 s4, s5, 15
-; GFX8-NEXT:    s_andn2_b32 s5, 15, s5
-; GFX8-NEXT:    s_lshr_b32 s3, s3, 1
 ; GFX8-NEXT:    s_lshr_b32 s7, s1, 16
 ; GFX8-NEXT:    s_lshl_b32 s1, s1, s4
+; GFX8-NEXT:    s_and_b32 s4, 0xffff, s3
+; GFX8-NEXT:    s_lshr_b32 s9, s5, 16
+; GFX8-NEXT:    s_andn2_b32 s5, 15, s5
+; GFX8-NEXT:    s_lshr_b32 s4, s4, 1
+; GFX8-NEXT:    s_lshr_b32 s4, s4, s5
+; GFX8-NEXT:    s_or_b32 s1, s1, s4
+; GFX8-NEXT:    s_and_b32 s4, s9, 15
+; GFX8-NEXT:    s_andn2_b32 s5, 15, s9
+; GFX8-NEXT:    s_lshr_b32 s3, s3, 17
+; GFX8-NEXT:    s_lshl_b32 s4, s7, s4
 ; GFX8-NEXT:    s_lshr_b32 s3, s3, s5
-; GFX8-NEXT:    s_or_b32 s1, s1, s3
-; GFX8-NEXT:    s_and_b32 s3, s11, 15
-; GFX8-NEXT:    s_andn2_b32 s4, 15, s11
-; GFX8-NEXT:    s_lshr_b32 s5, s9, 1
-; GFX8-NEXT:    s_lshl_b32 s3, s7, s3
-; GFX8-NEXT:    s_lshr_b32 s4, s5, s4
 ; GFX8-NEXT:    s_and_b32 s2, 0xffff, s2
-; GFX8-NEXT:    s_or_b32 s3, s3, s4
+; GFX8-NEXT:    s_or_b32 s3, s4, s3
 ; GFX8-NEXT:    s_and_b32 s0, 0xffff, s0
 ; GFX8-NEXT:    s_lshl_b32 s2, s2, 16
 ; GFX8-NEXT:    s_or_b32 s0, s0, s2
@@ -4810,26 +4801,25 @@ define <4 x half> @v_fshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs, <4 x i16> %amt)
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v7, v7, v8
 ; GFX8-NEXT:    v_or_b32_e32 v6, v6, v7
 ; GFX8-NEXT:    v_mov_b32_e32 v7, 15
-; GFX8-NEXT:    v_and_b32_sdwa v8, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_mov_b32_e32 v9, -1
+; GFX8-NEXT:    v_and_b32_sdwa v8, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_xor_b32_sdwa v4, v4, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    v_mov_b32_e32 v8, 1
 ; GFX8-NEXT:    v_and_b32_e32 v4, 15, v4
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v2, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 17, v2
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v2, v4, v2
 ; GFX8-NEXT:    v_xor_b32_e32 v4, -1, v5
 ; GFX8-NEXT:    v_or_b32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_and_b32_e32 v2, 15, v5
 ; GFX8-NEXT:    v_and_b32_e32 v4, 15, v4
-; GFX8-NEXT:    v_lshrrev_b16_e32 v10, 1, v3
+; GFX8-NEXT:    v_lshrrev_b16_e32 v8, 1, v3
 ; GFX8-NEXT:    v_lshlrev_b16_e32 v2, v2, v1
-; GFX8-NEXT:    v_lshrrev_b16_e32 v4, v4, v10
+; GFX8-NEXT:    v_lshrrev_b16_e32 v4, v4, v8
 ; GFX8-NEXT:    v_or_b32_e32 v2, v2, v4
 ; GFX8-NEXT:    v_and_b32_sdwa v4, v5, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_xor_b32_sdwa v5, v5, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_and_b32_e32 v5, 15, v5
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v3, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b32_e32 v3, 17, v3
 ; GFX8-NEXT:    v_lshlrev_b16_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v3, v5, v3
 ; GFX8-NEXT:    v_or_b32_e32 v1, v1, v3
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
index 238cc06fc7f7c..4b1dd7d89ef61 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
@@ -3446,18 +3446,19 @@ define amdgpu_ps i32 @s_fshr_v2i16(<2 x i16> inreg %lhs, <2 x i16> inreg %rhs, <
 ;
 ; GFX8-LABEL: s_fshr_v2i16:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_and_b32 s5, 0xffff, s1
+; GFX8-NEXT:    s_and_b32 s4, 0xffff, s1
 ; GFX8-NEXT:    s_lshr_b32 s3, s0, 16
-; GFX8-NEXT:    s_lshr_b32 s4, s1, 16
 ; GFX8-NEXT:    s_lshl_b32 s0, s0, 1
-; GFX8-NEXT:    s_lshr_b32 s5, s5, 15
-; GFX8-NEXT:    s_lshl_b32 s1, s1, 1
-; GFX8-NEXT:    s_or_b32 s0, s0, s5
+; GFX8-NEXT:    s_lshr_b32 s4, s4, 15
+; GFX8-NEXT:    s_or_b32 s0, s0, s4
+; GFX8-NEXT:    s_lshr_b32 s4, s1, 17
 ; GFX8-NEXT:    s_lshl_b32 s3, s3, 1
-; GFX8-NEXT:    s_lshr_b32 s5, s4, 15
+; GFX8-NEXT:    s_lshr_b32 s4, s4, 14
+; GFX8-NEXT:    s_or_b32 s3, s3, s4
+; GFX8-NEXT:    s_lshr_b32 s4, s1, 16
+; GFX8-NEXT:    s_lshl_b32 s1, s1, 1
 ; GFX8-NEXT:    s_xor_b32 s2, s2, -1
 ; GFX8-NEXT:    s_and_b32 s1, 0xffff, s1
-; GFX8-NEXT:    s_or_b32 s3, s3, s5
 ; GFX8-NEXT:    s_lshr_b32 s5, s2, 16
 ; GFX8-NEXT:    s_and_b32 s6, s2, 15
 ; GFX8-NEXT:    s_andn2_b32 s2, 15, s2
@@ -3592,22 +3593,23 @@ define <2 x i16> @v_fshr_v2i16(<2 x i16> %lhs, <2 x i16> %rhs, <2 x i16> %amt) {
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v4, 15, v1
 ; GFX8-NEXT:    v_or_b32_e32 v3, v3, v4
 ; GFX8-NEXT:    v_mov_b32_e32 v4, 1
-; GFX8-NEXT:    v_mov_b32_e32 v5, 15
+; GFX8-NEXT:    v_lshrrev_b32_e32 v5, 17, v1
 ; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v6, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v5, 14, v5
 ; GFX8-NEXT:    v_xor_b32_e32 v2, -1, v2
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v6
-; GFX8-NEXT:    v_lshlrev_b16_e32 v6, 1, v1
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v5
+; GFX8-NEXT:    v_lshlrev_b16_e32 v5, 1, v1
 ; GFX8-NEXT:    v_lshlrev_b16_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
 ; GFX8-NEXT:    v_and_b32_e32 v4, 15, v2
-; GFX8-NEXT:    v_xor_b32_e32 v7, -1, v2
-; GFX8-NEXT:    v_and_b32_e32 v7, 15, v7
+; GFX8-NEXT:    v_xor_b32_e32 v6, -1, v2
+; GFX8-NEXT:    v_and_b32_e32 v6, 15, v6
 ; GFX8-NEXT:    v_lshlrev_b16_e32 v3, v4, v3
-; GFX8-NEXT:    v_lshrrev_b16_e32 v4, 1, v6
-; GFX8-NEXT:    v_lshrrev_b16_e32 v4, v7, v4
+; GFX8-NEXT:    v_lshrrev_b16_e32 v4, 1, v5
+; GFX8-NEXT:    v_lshrrev_b16_e32 v4, v6, v4
 ; GFX8-NEXT:    v_or_b32_e32 v3, v3, v4
-; GFX8-NEXT:    v_and_b32_sdwa v4, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_mov_b32_e32 v4, 15
 ; GFX8-NEXT:    v_mov_b32_e32 v5, -1
+; GFX8-NEXT:    v_and_b32_sdwa v4, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_xor_b32_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_and_b32_e32 v2, 15, v2
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v1, 1, v1
@@ -3678,14 +3680,14 @@ define <2 x i16> @v_fshr_v2i16_4_8(<2 x i16> %lhs, <2 x i16> %rhs) {
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
-; GFX8-NEXT:    v_lshlrev_b16_e32 v0, 12, v0
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v3, 4, v1
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v3
-; GFX8-NEXT:    v_mov_b32_e32 v3, 8
+; GFX8-NEXT:    v_lshrrev_b32_e32 v1, 17, v1
 ; GFX8-NEXT:    v_lshlrev_b16_e32 v2, 8, v2
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v1, 7, v1
 ; GFX8-NEXT:    v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT:    v_lshlrev_b16_e32 v0, 12, v0
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v3
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX8-NEXT:    v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
@@ -3760,13 +3762,17 @@ define amdgpu_ps float @v_fshr_v2i16_ssv(<2 x i16> inreg %lhs, <2 x i16> inreg %
 ;
 ; GFX8-LABEL: v_fshr_v2i16_ssv:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_and_b32 s4, 0xffff, s1
+; GFX8-NEXT:    s_and_b32 s3, 0xffff, s1
 ; GFX8-NEXT:    s_lshr_b32 s2, s0, 16
 ; GFX8-NEXT:    s_lshl_b32 s0, s0, 1
-; GFX8-NEXT:    s_lshr_b32 s4, s4, 15
+; GFX8-NEXT:    s_lshr_b32 s3, s3, 15
+; GFX8-NEXT:    s_or_b32 s0, s0, s3
+; GFX8-NEXT:    s_lshr_b32 s3, s1, 17
+; GFX8-NEXT:    s_lshl_b32 s2, s2, 1
+; GFX8-NEXT:    s_lshr_b32 s3, s3, 14
 ; GFX8-NEXT:    v_xor_b32_e32 v0, -1, v0
+; GFX8-NEXT:    s_or_b32 s2, s2, s3
 ; GFX8-NEXT:    s_lshr_b32 s3, s1, 16
-; GFX8-NEXT:    s_or_b32 s0, s0, s4
 ; GFX8-NEXT:    s_lshl_b32 s1, s1, 1
 ; GFX8-NEXT:    v_and_b32_e32 v1, 15, v0
 ; GFX8-NEXT:    v_xor_b32_e32 v2, -1, v0
@@ -3775,16 +3781,13 @@ define amdgpu_ps float @v_fshr_v2i16_ssv(<2 x i16> inreg %lhs, <2 x i16> inreg %
 ; GFX8-NEXT:    v_and_b32_e32 v2, 15, v2
 ; GFX8-NEXT:    s_lshr_b32 s0, s0, 1
 ; GFX8-NEXT:    v_lshrrev_b16_e64 v2, v2, s0
-; GFX8-NEXT:    s_lshr_b32 s4, s3, 15
 ; GFX8-NEXT:    s_lshl_b32 s3, s3, 1
 ; GFX8-NEXT:    v_or_b32_e32 v1, v1, v2
 ; GFX8-NEXT:    v_mov_b32_e32 v2, 15
 ; GFX8-NEXT:    v_mov_b32_e32 v3, -1
-; GFX8-NEXT:    s_lshl_b32 s2, s2, 1
 ; GFX8-NEXT:    v_and_b32_sdwa v2, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_xor_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    s_and_b32 s0, 0xffff, s3
-; GFX8-NEXT:    s_or_b32 s2, s2, s4
 ; GFX8-NEXT:    v_and_b32_e32 v0, 15, v0
 ; GFX8-NEXT:    s_lshr_b32 s0, s0, 1
 ; GFX8-NEXT:    v_lshlrev_b16_e64 v2, v2, s2
@@ -3883,10 +3886,10 @@ define amdgpu_ps float @v_fshr_v2i16_svs(<2 x i16> inreg %lhs, <2 x i16> %rhs, <
 ; GFX8-NEXT:    s_lshr_b32 s2, s0, 16
 ; GFX8-NEXT:    s_lshl_b32 s0, s0, 1
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v1, 15, v0
-; GFX8-NEXT:    v_mov_b32_e32 v2, 15
+; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 17, v0
 ; GFX8-NEXT:    v_or_b32_e32 v1, s0, v1
 ; GFX8-NEXT:    s_lshl_b32 s0, s2, 1
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v2, 14, v2
 ; GFX8-NEXT:    v_or_b32_e32 v2, s0, v2
 ; GFX8-NEXT:    v_lshlrev_b16_e32 v3, 1, v0
 ; GFX8-NEXT:    v_mov_b32_e32 v4, 1
@@ -4005,18 +4008,19 @@ define amdgpu_ps float @v_fshr_v2i16_vss(<2 x i16> %lhs, <2 x i16> inreg %rhs, <
 ;
 ; GFX8-LABEL: v_fshr_v2i16_vss:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_and_b32 s3, 0xffff, s0
-; GFX8-NEXT:    s_lshr_b32 s2, s0, 16
+; GFX8-NEXT:    s_and_b32 s2, 0xffff, s0
 ; GFX8-NEXT:    v_lshlrev_b16_e32 v1, 1, v0
-; GFX8-NEXT:    s_lshr_b32 s3, s3, 15
+; GFX8-NEXT:    s_lshr_b32 s2, s2, 15
+; GFX8-NEXT:    v_or_b32_e32 v1, s2, v1
 ; GFX8-NEXT:    v_mov_b32_e32 v2, 1
-; GFX8-NEXT:    s_lshl_b32 s0, s0, 1
-; GFX8-NEXT:    v_or_b32_e32 v1, s3, v1
+; GFX8-NEXT:    s_lshr_b32 s2, s0, 17
 ; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    s_lshr_b32 s3, s2, 15
+; GFX8-NEXT:    s_lshr_b32 s2, s2, 14
+; GFX8-NEXT:    v_or_b32_e32 v0, s2, v0
+; GFX8-NEXT:    s_lshr_b32 s2, s0, 16
+; GFX8-NEXT:    s_lshl_b32 s0, s0, 1
 ; GFX8-NEXT:    s_xor_b32 s1, s1, -1
 ; GFX8-NEXT:    s_and_b32 s0, 0xffff, s0
-; GFX8-NEXT:    v_or_b32_e32 v0, s3, v0
 ; GFX8-NEXT:    s_lshr_b32 s3, s1, 16
 ; GFX8-NEXT:    s_and_b32 s4, s1, 15
 ; GFX8-NEXT:    s_andn2_b32 s1, 15, s1
@@ -4143,13 +4147,14 @@ define amdgpu_ps i48 @s_fshr_v3i16(<3 x i16> inreg %lhs, <3 x i16> inreg %rhs, <
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_and_b32 s8, 0xffff, s2
 ; GFX8-NEXT:    s_lshr_b32 s6, s0, 16
-; GFX8-NEXT:    s_lshr_b32 s7, s2, 16
 ; GFX8-NEXT:    s_lshl_b32 s0, s0, 1
 ; GFX8-NEXT:    s_lshr_b32 s8, s8, 15
-; GFX8-NEXT:    s_lshl_b32 s2, s2, 1
+; GFX8-NEXT:    s_lshr_b32 s7, s2, 16
 ; GFX8-NEXT:    s_or_b32 s0, s0, s8
+; GFX8-NEXT:    s_lshr_b32 s8, s2, 17
+; GFX8-NEXT:    s_lshl_b32 s2, s2, 1
 ; GFX8-NEXT:    s_lshl_b32 s6, s6, 1
-; GFX8-NEXT:    s_lshr_b32 s8, s7, 15
+; GFX8-NEXT:    s_lshr_b32 s8, s8, 14
 ; GFX8-NEXT:    s_xor_b32 s4, s4, -1
 ; GFX8-NEXT:    s_and_b32 s2, 0xffff, s2
 ; GFX8-NEXT:    s_or_b32 s6, s6, s8
@@ -4385,22 +4390,23 @@ define <3 x half> @v_fshr_v3i16(<3 x i16> %lhs, <3 x i16> %rhs, <3 x i16> %amt)
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v7, 15, v2
 ; GFX8-NEXT:    v_or_b32_e32 v6, v6, v7
 ; GFX8-NEXT:    v_mov_b32_e32 v7, 1
-; GFX8-NEXT:    v_mov_b32_e32 v8, 15
+; GFX8-NEXT:    v_lshrrev_b32_e32 v8, 17, v2
 ; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v9, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v8, 14, v8
 ; GFX8-NEXT:    v_xor_b32_e32 v4, -1, v4
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v9
-; GFX8-NEXT:    v_lshlrev_b16_e32 v9, 1, v2
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v8
+; GFX8-NEXT:    v_lshlrev_b16_e32 v8, 1, v2
 ; GFX8-NEXT:    v_lshlrev_b16_sdwa v2, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
 ; GFX8-NEXT:    v_and_b32_e32 v7, 15, v4
-; GFX8-NEXT:    v_xor_b32_e32 v10, -1, v4
-; GFX8-NEXT:    v_and_b32_e32 v10, 15, v10
+; GFX8-NEXT:    v_xor_b32_e32 v9, -1, v4
+; GFX8-NEXT:    v_and_b32_e32 v9, 15, v9
 ; GFX8-NEXT:    v_lshlrev_b16_e32 v6, v7, v6
-; GFX8-NEXT:    v_lshrrev_b16_e32 v7, 1, v9
-; GFX8-NEXT:    v_lshrrev_b16_e32 v7, v10, v7
+; GFX8-NEXT:    v_lshrrev_b16_e32 v7, 1, v8
+; GFX8-NEXT:    v_lshrrev_b16_e32 v7, v9, v7
 ; GFX8-NEXT:    v_or_b32_e32 v6, v6, v7
-; GFX8-NEXT:    v_and_b32_sdwa v7, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_mov_b32_e32 v7, 15
 ; GFX8-NEXT:    v_mov_b32_e32 v8, -1
+; GFX8-NEXT:    v_and_b32_sdwa v7, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_xor_b32_sdwa v4, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_and_b32_e32 v4, 15, v4
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v2, 1, v2
@@ -4557,18 +4563,19 @@ define amdgpu_ps <2 x i32> @s_fshr_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %
 ;
 ; GFX8-LABEL: s_fshr_v4i16:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_and_b32 s8, 0xffff, s2
+; GFX8-NEXT:    s_and_b32 s7, 0xffff, s2
 ; GFX8-NEXT:    s_lshr_b32 s6, s0, 16
-; GFX8-NEXT:    s_lshr_b32 s7, s2, 16
 ; GFX8-NEXT:    s_lshl_b32 s0, s0, 1
-; GFX8-NEXT:    s_lshr_b32 s8, s8, 15
-; GFX8-NEXT:    s_lshl_b32 s2, s2, 1
-; GFX8-NEXT:    s_or_b32 s0, s0, s8
+; GFX8-NEXT:    s_lshr_b32 s7, s7, 15
+; GFX8-NEXT:    s_or_b32 s0, s0, s7
+; GFX8-NEXT:    s_lshr_b32 s7, s2, 17
 ; GFX8-NEXT:    s_lshl_b32 s6, s6, 1
-; GFX8-NEXT:    s_lshr_b32 s8, s7, 15
+; GFX8-NEXT:    s_lshr_b32 s7, s7, 14
+; GFX8-NEXT:    s_or_b32 s6, s6, s7
+; GFX8-NEXT:    s_lshr_b32 s7, s2, 16
+; GFX8-NEXT:    s_lshl_b32 s2, s2, 1
 ; GFX8-NEXT:    s_xor_b32 s4, s4, -1
 ; GFX8-NEXT:    s_and_b32 s2, 0xffff, s2
-; GFX8-NEXT:    s_or_b32 s6, s6, s8
 ; GFX8-NEXT:    s_lshr_b32 s8, s4, 16
 ; GFX8-NEXT:    s_and_b32 s9, s4, 15
 ; GFX8-NEXT:    s_andn2_b32 s4, 15, s4
@@ -4587,19 +4594,20 @@ define amdgpu_ps <2 x i32> @s_fshr_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %
 ; GFX8-NEXT:    s_and_b32 s2, 0xffff, s2
 ; GFX8-NEXT:    s_and_b32 s0, 0xffff, s0
 ; GFX8-NEXT:    s_lshl_b32 s2, s2, 16
-; GFX8-NEXT:    s_and_b32 s6, 0xffff, s3
+; GFX8-NEXT:    s_and_b32 s4, 0xffff, s3
 ; GFX8-NEXT:    s_or_b32 s0, s0, s2
 ; GFX8-NEXT:    s_lshr_b32 s2, s1, 16
-; GFX8-NEXT:    s_lshr_b32 s4, s3, 16
 ; GFX8-NEXT:    s_lshl_b32 s1, s1, 1
-; GFX8-NEXT:    s_lshr_b32 s6, s6, 15
-; GFX8-NEXT:    s_lshl_b32 s3, s3, 1
-; GFX8-NEXT:    s_or_b32 s1, s1, s6
+; GFX8-NEXT:    s_lshr_b32 s4, s4, 15
+; GFX8-NEXT:    s_or_b32 s1, s1, s4
+; GFX8-NEXT:    s_lshr_b32 s4, s3, 17
 ; GFX8-NEXT:    s_lshl_b32 s2, s2, 1
-; GFX8-NEXT:    s_lshr_b32 s6, s4, 15
+; GFX8-NEXT:    s_lshr_b32 s4, s4, 14
+; GFX8-NEXT:    s_or_b32 s2, s2, s4
+; GFX8-NEXT:    s_lshr_b32 s4, s3, 16
+; GFX8-NEXT:    s_lshl_b32 s3, s3, 1
 ; GFX8-NEXT:    s_xor_b32 s5, s5, -1
 ; GFX8-NEXT:    s_and_b32 s3, 0xffff, s3
-; GFX8-NEXT:    s_or_b32 s2, s2, s6
 ; GFX8-NEXT:    s_lshr_b32 s6, s5, 16
 ; GFX8-NEXT:    s_and_b32 s7, s5, 15
 ; GFX8-NEXT:    s_andn2_b32 s5, 15, s5
@@ -4816,21 +4824,22 @@ define <4 x half> @v_fshr_v4i16(<4 x i16> %lhs, <4 x i16> %rhs, <4 x i16> %amt)
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v7, 15, v2
 ; GFX8-NEXT:    v_or_b32_e32 v6, v6, v7
 ; GFX8-NEXT:    v_mov_b32_e32 v7, 1
-; GFX8-NEXT:    v_mov_b32_e32 v8, 15
+; GFX8-NEXT:    v_lshrrev_b32_e32 v8, 17, v2
 ; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v9, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v8, 14, v8
 ; GFX8-NEXT:    v_xor_b32_e32 v4, -1, v4
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v9
-; GFX8-NEXT:    v_lshlrev_b16_e32 v9, 1, v2
-; GFX8-NEXT:    v_xor_b32_e32 v11, -1, v4
-; GFX8-NEXT:    v_and_b32_e32 v10, 15, v4
-; GFX8-NEXT:    v_and_b32_e32 v11, 15, v11
-; GFX8-NEXT:    v_lshrrev_b16_e32 v9, 1, v9
-; GFX8-NEXT:    v_lshlrev_b16_e32 v6, v10, v6
-; GFX8-NEXT:    v_lshrrev_b16_e32 v9, v11, v9
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v8
+; GFX8-NEXT:    v_lshlrev_b16_e32 v8, 1, v2
+; GFX8-NEXT:    v_xor_b32_e32 v10, -1, v4
+; GFX8-NEXT:    v_and_b32_e32 v9, 15, v4
+; GFX8-NEXT:    v_and_b32_e32 v10, 15, v10
+; GFX8-NEXT:    v_lshrrev_b16_e32 v8, 1, v8
+; GFX8-NEXT:    v_lshlrev_b16_e32 v6, v9, v6
+; GFX8-NEXT:    v_lshrrev_b16_e32 v8, v10, v8
+; GFX8-NEXT:    v_or_b32_e32 v6, v6, v8
+; GFX8-NEXT:    v_mov_b32_e32 v8, 15
 ; GFX8-NEXT:    v_mov_b32_e32 v10, -1
 ; GFX8-NEXT:    v_lshlrev_b16_sdwa v2, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    v_or_b32_e32 v6, v6, v9
 ; GFX8-NEXT:    v_and_b32_sdwa v9, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_xor_b32_sdwa v4, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_and_b32_e32 v4, 15, v4
@@ -4840,10 +4849,11 @@ define <4 x half> @v_fshr_v4i16(<4 x i16> %lhs, <4 x i16> %rhs, <4 x i16> %amt)
 ; GFX8-NEXT:    v_or_b32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_lshlrev_b16_e32 v2, 1, v1
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v4, 15, v3
-; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff, v0
 ; GFX8-NEXT:    v_or_b32_e32 v2, v2, v4
+; GFX8-NEXT:    v_lshrrev_b32_e32 v4, 17, v3
+; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff, v0
 ; GFX8-NEXT:    v_lshlrev_b16_sdwa v1, v7, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v4, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v4, 14, v4
 ; GFX8-NEXT:    v_xor_b32_e32 v5, -1, v5
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GFX8-NEXT:    v_or_b32_e32 v1, v1, v4
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
index 6baa10bb48621..8533e34ff13f8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
@@ -807,10 +807,10 @@ define <2 x i16> @v_lshr_v2i16_15(<2 x i16> %value) {
 ; GFX8-LABEL: v_lshr_v2i16_15:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_mov_b32_e32 v2, 15
-; GFX8-NEXT:    v_lshrrev_b16_e32 v1, 15, v0
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    v_mov_b32_e32 v1, 31
+; GFX8-NEXT:    v_lshrrev_b32_sdwa v1, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT:    v_lshrrev_b16_e32 v0, 15, v0
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_lshr_v2i16_15:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.128.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.128.ll
index 38ef707fa65a2..3685eed5043a3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.128.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.128.ll
@@ -71,14 +71,14 @@ define amdgpu_kernel void @store_lds_v4i32_align1(ptr addrspace(3) %out, <4 x i3
 ; GFX9-NEXT:    s_load_dword s6, s[4:5], 0x0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    s_and_b32 s5, 0xffff, s0
-; GFX9-NEXT:    s_lshr_b32 s4, s0, 16
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s6
-; GFX9-NEXT:    s_lshr_b32 s0, s5, 8
+; GFX9-NEXT:    s_lshr_b32 s5, s5, 8
+; GFX9-NEXT:    s_lshr_b32 s4, s0, 16
 ; GFX9-NEXT:    ds_write_b8 v1, v0
-; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:1
-; GFX9-NEXT:    s_lshr_b32 s0, s4, 8
+; GFX9-NEXT:    s_lshr_b32 s0, s0, 24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:2
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
@@ -90,7 +90,7 @@ define amdgpu_kernel void @store_lds_v4i32_align1(ptr addrspace(3) %out, <4 x i3
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:4
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:5
-; GFX9-NEXT:    s_lshr_b32 s1, s0, 8
+; GFX9-NEXT:    s_lshr_b32 s1, s1, 24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:6
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
@@ -102,7 +102,7 @@ define amdgpu_kernel void @store_lds_v4i32_align1(ptr addrspace(3) %out, <4 x i3
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:8
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:9
-; GFX9-NEXT:    s_lshr_b32 s1, s0, 8
+; GFX9-NEXT:    s_lshr_b32 s1, s2, 24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:10
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
@@ -114,7 +114,7 @@ define amdgpu_kernel void @store_lds_v4i32_align1(ptr addrspace(3) %out, <4 x i3
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:12
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:13
-; GFX9-NEXT:    s_lshr_b32 s1, s0, 8
+; GFX9-NEXT:    s_lshr_b32 s1, s3, 24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:14
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
@@ -181,37 +181,37 @@ define amdgpu_kernel void @store_lds_v4i32_align1(ptr addrspace(3) %out, <4 x i3
 ; GFX10-NEXT:    s_load_dword s6, s[4:5], 0x0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_lshr_b32 s4, s0, 16
-; GFX10-NEXT:    s_and_b32 s5, 0xffff, s0
 ; GFX10-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s6
-; GFX10-NEXT:    s_lshr_b32 s0, s1, 16
-; GFX10-NEXT:    s_and_b32 s6, 0xffff, s1
+; GFX10-NEXT:    s_and_b32 s5, 0xffff, s0
+; GFX10-NEXT:    s_lshr_b32 s0, s0, 24
+; GFX10-NEXT:    s_and_b32 s7, 0xffff, s1
 ; GFX10-NEXT:    v_mov_b32_e32 v2, s1
-; GFX10-NEXT:    s_lshr_b32 s1, s2, 16
-; GFX10-NEXT:    s_and_b32 s7, 0xffff, s2
-; GFX10-NEXT:    v_mov_b32_e32 v3, s2
-; GFX10-NEXT:    s_lshr_b32 s2, s5, 8
+; GFX10-NEXT:    s_lshr_b32 s6, s1, 16
 ; GFX10-NEXT:    v_mov_b32_e32 v4, s4
-; GFX10-NEXT:    s_lshr_b32 s5, s4, 8
-; GFX10-NEXT:    s_lshr_b32 s4, s6, 8
-; GFX10-NEXT:    s_lshr_b32 s6, s0, 8
+; GFX10-NEXT:    s_lshr_b32 s1, s1, 24
+; GFX10-NEXT:    s_lshr_b32 s8, s2, 16
+; GFX10-NEXT:    s_and_b32 s9, 0xffff, s2
+; GFX10-NEXT:    s_lshr_b32 s5, s5, 8
 ; GFX10-NEXT:    v_mov_b32_e32 v5, s0
-; GFX10-NEXT:    v_mov_b32_e32 v6, s2
 ; GFX10-NEXT:    s_lshr_b32 s0, s7, 8
-; GFX10-NEXT:    v_mov_b32_e32 v7, s5
-; GFX10-NEXT:    v_mov_b32_e32 v8, s4
-; GFX10-NEXT:    v_mov_b32_e32 v9, s6
+; GFX10-NEXT:    v_mov_b32_e32 v6, s6
+; GFX10-NEXT:    v_mov_b32_e32 v7, s1
+; GFX10-NEXT:    s_lshr_b32 s1, s9, 8
+; GFX10-NEXT:    v_mov_b32_e32 v8, s5
+; GFX10-NEXT:    v_mov_b32_e32 v9, s0
 ; GFX10-NEXT:    ds_write_b8 v1, v0
 ; GFX10-NEXT:    ds_write_b8 v1, v2 offset:4
 ; GFX10-NEXT:    ds_write_b8 v1, v4 offset:2
-; GFX10-NEXT:    ds_write_b8 v1, v5 offset:6
-; GFX10-NEXT:    ds_write_b8 v1, v6 offset:1
-; GFX10-NEXT:    ds_write_b8 v1, v7 offset:3
-; GFX10-NEXT:    ds_write_b8 v1, v8 offset:5
-; GFX10-NEXT:    v_mov_b32_e32 v0, s1
-; GFX10-NEXT:    v_mov_b32_e32 v10, s0
-; GFX10-NEXT:    s_lshr_b32 s0, s1, 8
-; GFX10-NEXT:    ds_write_b8 v1, v9 offset:7
+; GFX10-NEXT:    ds_write_b8 v1, v5 offset:3
+; GFX10-NEXT:    ds_write_b8 v1, v6 offset:6
+; GFX10-NEXT:    ds_write_b8 v1, v8 offset:1
+; GFX10-NEXT:    ds_write_b8 v1, v9 offset:5
+; GFX10-NEXT:    v_mov_b32_e32 v0, s8
+; GFX10-NEXT:    v_mov_b32_e32 v3, s2
+; GFX10-NEXT:    v_mov_b32_e32 v10, s1
+; GFX10-NEXT:    s_lshr_b32 s0, s2, 24
+; GFX10-NEXT:    ds_write_b8 v1, v7 offset:7
 ; GFX10-NEXT:    ds_write_b8 v1, v3 offset:8
 ; GFX10-NEXT:    ds_write_b8 v1, v10 offset:9
 ; GFX10-NEXT:    ds_write_b8 v1, v0 offset:10
@@ -221,7 +221,7 @@ define amdgpu_kernel void @store_lds_v4i32_align1(ptr addrspace(3) %out, <4 x i3
 ; GFX10-NEXT:    s_lshr_b32 s0, s0, 8
 ; GFX10-NEXT:    v_mov_b32_e32 v2, s3
 ; GFX10-NEXT:    v_mov_b32_e32 v3, s0
-; GFX10-NEXT:    s_lshr_b32 s0, s1, 8
+; GFX10-NEXT:    s_lshr_b32 s0, s3, 24
 ; GFX10-NEXT:    v_mov_b32_e32 v4, s1
 ; GFX10-NEXT:    v_mov_b32_e32 v5, s0
 ; GFX10-NEXT:    ds_write_b8 v1, v0 offset:11
@@ -240,38 +240,37 @@ define amdgpu_kernel void @store_lds_v4i32_align1(ptr addrspace(3) %out, <4 x i3
 ; GFX11-NEXT:    s_and_b32 s6, 0xffff, s0
 ; GFX11-NEXT:    s_lshr_b32 s5, s0, 16
 ; GFX11-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s4
-; GFX11-NEXT:    s_lshr_b32 s0, s1, 16
-; GFX11-NEXT:    s_and_b32 s4, 0xffff, s1
+; GFX11-NEXT:    s_lshr_b32 s0, s0, 24
+; GFX11-NEXT:    s_lshr_b32 s4, s1, 16
+; GFX11-NEXT:    s_and_b32 s7, 0xffff, s1
+; GFX11-NEXT:    s_lshr_b32 s6, s6, 8
 ; GFX11-NEXT:    v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v3, s2
-; GFX11-NEXT:    s_lshr_b32 s1, s2, 16
-; GFX11-NEXT:    s_and_b32 s7, 0xffff, s2
-; GFX11-NEXT:    s_lshr_b32 s2, s6, 8
-; GFX11-NEXT:    s_lshr_b32 s6, s5, 8
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT:    v_dual_mov_b32 v6, s2 :: v_dual_mov_b32 v7, s6
+; GFX11-NEXT:    s_lshr_b32 s1, s1, 24
 ; GFX11-NEXT:    v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s0
-; GFX11-NEXT:    s_lshr_b32 s4, s4, 8
-; GFX11-NEXT:    s_lshr_b32 s5, s0, 8
 ; GFX11-NEXT:    s_lshr_b32 s0, s7, 8
-; GFX11-NEXT:    v_dual_mov_b32 v8, s4 :: v_dual_mov_b32 v9, s5
+; GFX11-NEXT:    v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s6
+; GFX11-NEXT:    s_and_b32 s9, 0xffff, s2
+; GFX11-NEXT:    v_dual_mov_b32 v8, s1 :: v_dual_mov_b32 v9, s0
+; GFX11-NEXT:    s_lshr_b32 s0, s2, 24
+; GFX11-NEXT:    s_lshr_b32 s1, s9, 8
 ; GFX11-NEXT:    ds_store_b8 v1, v0
-; GFX11-NEXT:    ds_store_b8 v1, v6 offset:1
+; GFX11-NEXT:    ds_store_b8 v1, v7 offset:1
 ; GFX11-NEXT:    ds_store_b8 v1, v4 offset:2
-; GFX11-NEXT:    ds_store_b8 v1, v7 offset:3
+; GFX11-NEXT:    ds_store_b8 v1, v5 offset:3
 ; GFX11-NEXT:    ds_store_b8 v1, v2 offset:4
-; GFX11-NEXT:    ds_store_b8 v1, v8 offset:5
-; GFX11-NEXT:    ds_store_b8 v1, v5 offset:6
-; GFX11-NEXT:    ds_store_b8 v1, v9 offset:7
-; GFX11-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v5, s3
-; GFX11-NEXT:    s_lshr_b32 s0, s1, 8
-; GFX11-NEXT:    v_mov_b32_e32 v2, s1
+; GFX11-NEXT:    ds_store_b8 v1, v9 offset:5
+; GFX11-NEXT:    ds_store_b8 v1, v6 offset:6
+; GFX11-NEXT:    ds_store_b8 v1, v8 offset:7
 ; GFX11-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX11-NEXT:    s_and_b32 s0, 0xffff, s3
-; GFX11-NEXT:    s_lshr_b32 s1, s3, 16
+; GFX11-NEXT:    s_lshr_b32 s8, s2, 16
+; GFX11-NEXT:    v_dual_mov_b32 v0, s1 :: v_dual_mov_b32 v5, s3
 ; GFX11-NEXT:    s_lshr_b32 s0, s0, 8
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    v_dual_mov_b32 v7, s1 :: v_dual_mov_b32 v6, s0
-; GFX11-NEXT:    s_lshr_b32 s0, s1, 8
+; GFX11-NEXT:    s_lshr_b32 s1, s3, 16
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v7, s1
+; GFX11-NEXT:    v_mov_b32_e32 v6, s0
+; GFX11-NEXT:    s_lshr_b32 s0, s3, 24
 ; GFX11-NEXT:    v_mov_b32_e32 v8, s0
 ; GFX11-NEXT:    ds_store_b8 v1, v3 offset:8
 ; GFX11-NEXT:    ds_store_b8 v1, v0 offset:9
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.96.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.96.ll
index 1d2d330eeb61a..cce6bd9301cbf 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.96.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.96.ll
@@ -72,15 +72,15 @@ define amdgpu_kernel void @store_lds_v3i32_align1(ptr addrspace(3) %out, <3 x i3
 ; GFX9-NEXT:    s_load_dword s3, s[4:5], 0x0
 ; GFX9-NEXT:    ; kill: killed $sgpr4_sgpr5
 ; GFX9-NEXT:    s_and_b32 s5, 0xffff, s0
-; GFX9-NEXT:    s_lshr_b32 s4, s0, 16
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s3
-; GFX9-NEXT:    s_lshr_b32 s0, s5, 8
+; GFX9-NEXT:    s_lshr_b32 s3, s5, 8
+; GFX9-NEXT:    s_lshr_b32 s4, s0, 16
 ; GFX9-NEXT:    ds_write_b8 v1, v0
-; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v0, s3
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:1
-; GFX9-NEXT:    s_lshr_b32 s0, s4, 8
+; GFX9-NEXT:    s_lshr_b32 s0, s0, 24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:2
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
@@ -92,7 +92,7 @@ define amdgpu_kernel void @store_lds_v3i32_align1(ptr addrspace(3) %out, <3 x i3
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:4
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s3
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:5
-; GFX9-NEXT:    s_lshr_b32 s1, s0, 8
+; GFX9-NEXT:    s_lshr_b32 s1, s1, 24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:6
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
@@ -104,7 +104,7 @@ define amdgpu_kernel void @store_lds_v3i32_align1(ptr addrspace(3) %out, <3 x i3
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:8
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:9
-; GFX9-NEXT:    s_lshr_b32 s1, s0, 8
+; GFX9-NEXT:    s_lshr_b32 s1, s2, 24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:10
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
@@ -163,37 +163,37 @@ define amdgpu_kernel void @store_lds_v3i32_align1(ptr addrspace(3) %out, <3 x i3
 ; GFX10-NEXT:    s_lshr_b32 s4, s0, 16
 ; GFX10-NEXT:    s_and_b32 s5, 0xffff, s0
 ; GFX10-NEXT:    v_mov_b32_e32 v0, s0
-; GFX10-NEXT:    s_lshr_b32 s0, s1, 16
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s3
-; GFX10-NEXT:    s_and_b32 s3, 0xffff, s1
+; GFX10-NEXT:    s_lshr_b32 s0, s0, 24
+; GFX10-NEXT:    s_lshr_b32 s3, s1, 16
+; GFX10-NEXT:    s_and_b32 s6, 0xffff, s1
 ; GFX10-NEXT:    v_mov_b32_e32 v2, s1
-; GFX10-NEXT:    s_lshr_b32 s1, s2, 16
-; GFX10-NEXT:    s_and_b32 s6, 0xffff, s2
-; GFX10-NEXT:    v_mov_b32_e32 v3, s2
-; GFX10-NEXT:    s_lshr_b32 s2, s5, 8
-; GFX10-NEXT:    s_lshr_b32 s5, s4, 8
+; GFX10-NEXT:    s_lshr_b32 s1, s1, 24
+; GFX10-NEXT:    s_and_b32 s8, 0xffff, s2
 ; GFX10-NEXT:    v_mov_b32_e32 v4, s4
-; GFX10-NEXT:    s_lshr_b32 s4, s0, 8
 ; GFX10-NEXT:    v_mov_b32_e32 v5, s0
 ; GFX10-NEXT:    s_lshr_b32 s0, s6, 8
-; GFX10-NEXT:    v_mov_b32_e32 v9, s4
-; GFX10-NEXT:    s_lshr_b32 s3, s3, 8
-; GFX10-NEXT:    v_mov_b32_e32 v6, s2
-; GFX10-NEXT:    v_mov_b32_e32 v10, s0
-; GFX10-NEXT:    s_lshr_b32 s0, s1, 8
-; GFX10-NEXT:    v_mov_b32_e32 v7, s5
-; GFX10-NEXT:    v_mov_b32_e32 v8, s3
+; GFX10-NEXT:    v_mov_b32_e32 v7, s1
+; GFX10-NEXT:    s_lshr_b32 s1, s8, 8
+; GFX10-NEXT:    s_lshr_b32 s7, s2, 16
+; GFX10-NEXT:    v_mov_b32_e32 v3, s2
+; GFX10-NEXT:    s_lshr_b32 s5, s5, 8
+; GFX10-NEXT:    v_mov_b32_e32 v6, s3
+; GFX10-NEXT:    v_mov_b32_e32 v9, s0
+; GFX10-NEXT:    v_mov_b32_e32 v10, s1
+; GFX10-NEXT:    s_lshr_b32 s0, s2, 24
+; GFX10-NEXT:    v_mov_b32_e32 v8, s5
 ; GFX10-NEXT:    ds_write_b8 v1, v0
 ; GFX10-NEXT:    ds_write_b8 v1, v2 offset:4
 ; GFX10-NEXT:    ds_write_b8 v1, v4 offset:2
-; GFX10-NEXT:    ds_write_b8 v1, v5 offset:6
-; GFX10-NEXT:    ds_write_b8 v1, v6 offset:1
-; GFX10-NEXT:    ds_write_b8 v1, v7 offset:3
-; GFX10-NEXT:    ds_write_b8 v1, v8 offset:5
-; GFX10-NEXT:    v_mov_b32_e32 v0, s1
+; GFX10-NEXT:    ds_write_b8 v1, v5 offset:3
+; GFX10-NEXT:    ds_write_b8 v1, v6 offset:6
+; GFX10-NEXT:    ds_write_b8 v1, v8 offset:1
+; GFX10-NEXT:    ds_write_b8 v1, v9 offset:5
+; GFX10-NEXT:    v_mov_b32_e32 v0, s7
 ; GFX10-NEXT:    v_mov_b32_e32 v2, s0
-; GFX10-NEXT:    ds_write_b8 v1, v9 offset:7
+; GFX10-NEXT:    ds_write_b8 v1, v7 offset:7
 ; GFX10-NEXT:    ds_write_b8 v1, v3 offset:8
 ; GFX10-NEXT:    ds_write_b8 v1, v10 offset:9
 ; GFX10-NEXT:    ds_write_b8 v1, v0 offset:10
@@ -206,37 +206,37 @@ define amdgpu_kernel void @store_lds_v3i32_align1(ptr addrspace(3) %out, <3 x i3
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_load_b32 s3, s[4:5], 0x0
 ; GFX11-NEXT:    s_and_b32 s5, 0xffff, s0
+; GFX11-NEXT:    s_lshr_b32 s7, s2, 16
+; GFX11-NEXT:    s_lshr_b32 s5, s5, 8
 ; GFX11-NEXT:    s_lshr_b32 s4, s0, 16
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s3
-; GFX11-NEXT:    s_lshr_b32 s0, s1, 16
-; GFX11-NEXT:    s_and_b32 s3, 0xffff, s1
+; GFX11-NEXT:    s_lshr_b32 s0, s0, 24
+; GFX11-NEXT:    s_lshr_b32 s3, s1, 16
+; GFX11-NEXT:    s_and_b32 s6, 0xffff, s1
 ; GFX11-NEXT:    v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v3, s2
-; GFX11-NEXT:    s_lshr_b32 s1, s2, 16
-; GFX11-NEXT:    s_and_b32 s6, 0xffff, s2
-; GFX11-NEXT:    s_lshr_b32 s2, s5, 8
-; GFX11-NEXT:    s_lshr_b32 s5, s4, 8
-; GFX11-NEXT:    v_dual_mov_b32 v6, s1 :: v_dual_mov_b32 v7, s2
+; GFX11-NEXT:    s_lshr_b32 s1, s1, 24
+; GFX11-NEXT:    s_and_b32 s8, 0xffff, s2
+; GFX11-NEXT:    v_dual_mov_b32 v8, s7 :: v_dual_mov_b32 v9, s5
 ; GFX11-NEXT:    v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s0
-; GFX11-NEXT:    s_lshr_b32 s3, s3, 8
-; GFX11-NEXT:    s_lshr_b32 s4, s0, 8
+; GFX11-NEXT:    s_lshr_b32 s2, s2, 24
 ; GFX11-NEXT:    s_lshr_b32 s0, s6, 8
-; GFX11-NEXT:    s_lshr_b32 s6, s1, 8
-; GFX11-NEXT:    v_dual_mov_b32 v8, s5 :: v_dual_mov_b32 v9, s3
-; GFX11-NEXT:    v_dual_mov_b32 v10, s4 :: v_dual_mov_b32 v11, s0
-; GFX11-NEXT:    v_mov_b32_e32 v12, s6
+; GFX11-NEXT:    v_dual_mov_b32 v6, s3 :: v_dual_mov_b32 v7, s1
+; GFX11-NEXT:    s_lshr_b32 s1, s8, 8
+; GFX11-NEXT:    v_dual_mov_b32 v10, s2 :: v_dual_mov_b32 v11, s0
+; GFX11-NEXT:    v_mov_b32_e32 v12, s1
 ; GFX11-NEXT:    ds_store_b8 v1, v0
-; GFX11-NEXT:    ds_store_b8 v1, v7 offset:1
+; GFX11-NEXT:    ds_store_b8 v1, v9 offset:1
 ; GFX11-NEXT:    ds_store_b8 v1, v4 offset:2
-; GFX11-NEXT:    ds_store_b8 v1, v8 offset:3
+; GFX11-NEXT:    ds_store_b8 v1, v5 offset:3
 ; GFX11-NEXT:    ds_store_b8 v1, v2 offset:4
-; GFX11-NEXT:    ds_store_b8 v1, v9 offset:5
-; GFX11-NEXT:    ds_store_b8 v1, v5 offset:6
-; GFX11-NEXT:    ds_store_b8 v1, v10 offset:7
+; GFX11-NEXT:    ds_store_b8 v1, v11 offset:5
+; GFX11-NEXT:    ds_store_b8 v1, v6 offset:6
+; GFX11-NEXT:    ds_store_b8 v1, v7 offset:7
 ; GFX11-NEXT:    ds_store_b8 v1, v3 offset:8
-; GFX11-NEXT:    ds_store_b8 v1, v11 offset:9
-; GFX11-NEXT:    ds_store_b8 v1, v6 offset:10
-; GFX11-NEXT:    ds_store_b8 v1, v12 offset:11
+; GFX11-NEXT:    ds_store_b8 v1, v12 offset:9
+; GFX11-NEXT:    ds_store_b8 v1, v8 offset:10
+; GFX11-NEXT:    ds_store_b8 v1, v10 offset:11
 ; GFX11-NEXT:    s_endpgm
   store <3 x i32> %x, ptr addrspace(3) %out, align 1
   ret void
diff --git a/llvm/test/CodeGen/AMDGPU/ds-alignment.ll b/llvm/test/CodeGen/AMDGPU/ds-alignment.ll
index 93422e259b827..4b52d6efb8e98 100644
--- a/llvm/test/CodeGen/AMDGPU/ds-alignment.ll
+++ b/llvm/test/CodeGen/AMDGPU/ds-alignment.ll
@@ -105,14 +105,13 @@ define amdgpu_kernel void @ds4align1(ptr addrspace(3) %in, ptr addrspace(3) %out
 ; ALIGNED-GISEL-LABEL: ds4align1:
 ; ALIGNED-GISEL:       ; %bb.0:
 ; ALIGNED-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
-; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v4, 8
 ; ALIGNED-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v0, s0
 ; ALIGNED-GISEL-NEXT:    ds_read_u8 v1, v0
 ; ALIGNED-GISEL-NEXT:    ds_read_u8 v2, v0 offset:1
 ; ALIGNED-GISEL-NEXT:    ds_read_u8 v3, v0 offset:3
 ; ALIGNED-GISEL-NEXT:    ds_read_u8 v0, v0 offset:2
-; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v5, s1
+; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v4, s1
 ; ALIGNED-GISEL-NEXT:    s_waitcnt lgkmcnt(2)
 ; ALIGNED-GISEL-NEXT:    v_lshl_or_b32 v1, v2, 8, v1
 ; ALIGNED-GISEL-NEXT:    s_waitcnt lgkmcnt(1)
@@ -121,11 +120,11 @@ define amdgpu_kernel void @ds4align1(ptr addrspace(3) %in, ptr addrspace(3) %out
 ; ALIGNED-GISEL-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; ALIGNED-GISEL-NEXT:    v_or3_b32 v0, v2, v0, v1
 ; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_e32 v1, 8, v0
-; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v0
-; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1 offset:1
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v1, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v5, v0 offset:2
-; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1 offset:3
+; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v0
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v2, 24, v0
+; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v1 offset:1
+; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v4, v0 offset:2
+; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v2 offset:3
 ; ALIGNED-GISEL-NEXT:    s_endpgm
 ;
 ; UNALIGNED-LABEL: ds4align1:
@@ -262,14 +261,13 @@ define amdgpu_kernel void @ds8align1(ptr addrspace(3) %in, ptr addrspace(3) %out
 ; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v3, s1
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v3, v1
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v3, v2 offset:1
-; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v2, 8
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v4, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v2, 24, v1
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v3, v1 offset:2
-; ALIGNED-GISEL-NEXT:    ds_write_b8 v3, v4 offset:3
+; ALIGNED-GISEL-NEXT:    ds_write_b8 v3, v2 offset:3
 ; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_e32 v1, 8, v0
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v3, v0 offset:4
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v3, v1 offset:5
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v1, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v1, 24, v0
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v3, v0 offset:6
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v3, v1 offset:7
 ; ALIGNED-GISEL-NEXT:    s_endpgm
@@ -448,26 +446,25 @@ define amdgpu_kernel void @ds12align1(ptr addrspace(3) %in, ptr addrspace(3) %ou
 ; ALIGNED-GISEL-NEXT:    v_lshlrev_b32_e32 v4, 16, v5
 ; ALIGNED-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; ALIGNED-GISEL-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
+; ALIGNED-GISEL-NEXT:    v_or3_b32 v2, v6, v7, v2
 ; ALIGNED-GISEL-NEXT:    v_or3_b32 v0, v0, v4, v3
 ; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_e32 v3, 8, v1
 ; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v4, s1
-; ALIGNED-GISEL-NEXT:    v_or3_b32 v2, v6, v7, v2
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v1
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v3 offset:1
-; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v3, 8
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v5, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v3, 24, v1
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v4, v1 offset:2
-; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v5 offset:3
+; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v3 offset:3
 ; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_e32 v1, 8, v2
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v2 offset:4
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v1 offset:5
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v1, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v1, 24, v2
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v4, v2 offset:6
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v1 offset:7
 ; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_e32 v1, 8, v0
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v0 offset:8
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v1 offset:9
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v1, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v1, 24, v0
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v4, v0 offset:10
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v1 offset:11
 ; ALIGNED-GISEL-NEXT:    s_endpgm
@@ -765,26 +762,25 @@ define amdgpu_kernel void @ds16align1(ptr addrspace(3) %in, ptr addrspace(3) %ou
 ; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v4 offset:1
-; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v4, 8
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v6, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v4, 24, v1
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v5, v1 offset:2
-; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v6 offset:3
+; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v4 offset:3
 ; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_e32 v1, 8, v2
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v2 offset:4
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1 offset:5
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v1, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v1, 24, v2
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v5, v2 offset:6
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1 offset:7
 ; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_e32 v1, 8, v3
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v3 offset:8
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1 offset:9
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v1, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v1, 24, v3
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v5, v3 offset:10
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1 offset:11
 ; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_e32 v1, 8, v0
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v0 offset:12
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1 offset:13
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v1, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v1, 24, v0
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v5, v0 offset:14
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1 offset:15
 ; ALIGNED-GISEL-NEXT:    s_endpgm

>From c800bef9ffec0eb31b6f133de5d128c2d5663503 Mon Sep 17 00:00:00 2001
From: Yu Li <yu.li at arm.com>
Date: Wed, 27 Aug 2025 09:41:36 +0000
Subject: [PATCH 2/3] whitespace

---
 llvm/lib/Target/AArch64/AArch64Combine.td | 2 --
 1 file changed, 2 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index e44819ad5a4ae..a64f767ff320d 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -349,8 +349,6 @@ def AArch64PostLegalizerLowering
 }
 
 // Post-legalization combines which are primarily optimizations.
-
-
 def AArch64PostLegalizerCombiner
     : GICombiner<"AArch64PostLegalizerCombinerImpl",
                        [copy_prop, cast_of_cast_combines, 

>From b39b1d6c5670acebc9997f9b050745d7220c92f6 Mon Sep 17 00:00:00 2001
From: Yu Li <yu.li at arm.com>
Date: Wed, 27 Aug 2025 12:14:40 +0000
Subject: [PATCH 3/3] re,ove wip_match_opcode from GICombineRule

---
 llvm/include/llvm/Target/GlobalISel/Combine.td | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 46e41a5cc4c79..97886594ee751 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -397,10 +397,15 @@ def commute_shift : GICombineRule<
   (apply [{ Helper.applyBuildFn(*${d}, ${matchinfo}); }])>;
 
 // Fold (shift (trunc (shift x, C1)), C2) -> trunc (shift x, (C1 + C2))
+def shift_right_op : GICombinePatFrag<
+  (outs root:$dst), (ins),
+  !foreach(op,
+           [G_LSHR, G_ASHR],
+           (pattern (op $dst, $shifted, $amt)))>;
 def shift_of_trunc_of_shift_matchdata : GIDefMatchData<"ShiftOfTruncOfShift">;
 def shift_of_trunc_of_shift : GICombineRule<
-  (defs root:$root, shift_of_trunc_of_shift_matchdata:$matchinfo),
-  (match (wip_match_opcode G_LSHR, G_ASHR):$root,
+  (defs root:$dst, shift_of_trunc_of_shift_matchdata:$matchinfo),
+  (match (shift_right_op $dst):$root,
          [{ return Helper.matchShiftOfTruncOfShift(*${root}, ${matchinfo}); }]),
   (apply [{ Helper.applyShiftOfTruncOfShift(*${root}, ${matchinfo}); }])>;
 



More information about the llvm-commits mailing list