[llvm] 619d36f - [GISel] Combine shift + trunc + shift pattern (#155583)

via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 10 07:02:00 PDT 2025


Author: jyli0116
Date: 2025-09-10T15:01:55+01:00
New Revision: 619d36ff4f7552b0e5786f4cf24d93d8b881aa58

URL: https://github.com/llvm/llvm-project/commit/619d36ff4f7552b0e5786f4cf24d93d8b881aa58
DIFF: https://github.com/llvm/llvm-project/commit/619d36ff4f7552b0e5786f4cf24d93d8b881aa58.diff

LOG: [GISel] Combine shift + trunc + shift pattern (#155583)

Folds shift(trunc(shift(...))) pattern into trunc(shift(...)) by
combining the two shift instructions

Added: 
    llvm/test/CodeGen/AArch64/lshr-trunc-lshr.ll

Modified: 
    llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
    llvm/include/llvm/Target/GlobalISel/Combine.td
    llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
    llvm/lib/Target/AArch64/AArch64Combine.td
    llvm/test/CodeGen/AArch64/rem-by-const.ll
    llvm/test/CodeGen/AArch64/urem-lkk.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.128.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.96.ll
    llvm/test/CodeGen/AMDGPU/ds-alignment.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 6dba689e8af71..5a1ff3d128bfb 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -81,6 +81,15 @@ struct ShiftOfShiftedLogic {
   uint64_t ValSum;
 };
 
+struct LshrOfTruncOfLshr {
+  bool Mask = false;
+  APInt MaskVal;
+  Register Src;
+  APInt ShiftAmt;
+  LLT ShiftAmtTy;
+  LLT InnerShiftTy;
+};
+
 using BuildFnTy = std::function<void(MachineIRBuilder &)>;
 
 using OperandBuildSteps =
@@ -338,6 +347,12 @@ class CombinerHelper {
 
   bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) const;
 
+  /// Fold (lshr (trunc (lshr x, C1)), C2) -> trunc (shift x, (C1 + C2))
+  bool matchLshrOfTruncOfLshr(MachineInstr &MI, LshrOfTruncOfLshr &MatchInfo,
+                              MachineInstr &ShiftMI) const;
+  void applyLshrOfTruncOfLshr(MachineInstr &MI,
+                              LshrOfTruncOfLshr &MatchInfo) const;
+
   /// Transform a multiply by a power-of-2 value to a left shift.
   bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const;
   void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const;

diff  --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 1b7c1235c9805..204e1f6887fa2 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -396,6 +396,16 @@ def commute_shift : GICombineRule<
          [{ return Helper.matchCommuteShift(*${d}, ${matchinfo}); }]),
   (apply [{ Helper.applyBuildFn(*${d}, ${matchinfo}); }])>;
 
+// Fold (lshr (trunc (lshr x, C1)), C2) -> trunc (lshr x, (C1 + C2))
+def lshr_of_trunc_of_lshr_matchdata : GIDefMatchData<"LshrOfTruncOfLshr">;
+def lshr_of_trunc_of_lshr : GICombineRule<
+  (defs root:$root, lshr_of_trunc_of_lshr_matchdata:$matchinfo),
+  (match (G_LSHR $d1, $x, $y):$Shift,
+         (G_TRUNC $d2, $d1),
+         (G_LSHR $dst, $d2, $z):$root,
+         [{ return Helper.matchLshrOfTruncOfLshr(*${root}, ${matchinfo}, *${Shift}); }]),
+  (apply [{ Helper.applyLshrOfTruncOfLshr(*${root}, ${matchinfo}); }])>;
+
 def narrow_binop_feeding_and : GICombineRule<
   (defs root:$root, build_fn_matchinfo:$matchinfo),
   (match (wip_match_opcode G_AND):$root,
@@ -2133,7 +2143,8 @@ def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines,
     fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors,
     simplify_neg_minmax, combine_concat_vector,
     sext_trunc, zext_trunc, prefer_sign_combines, shuffle_combines,
-    combine_use_vector_truncate, merge_combines, overflow_combines, truncsat_combines]>;
+    combine_use_vector_truncate, merge_combines, overflow_combines, 
+    truncsat_combines, lshr_of_trunc_of_lshr]>;
 
 // A combine group used to for prelegalizer combiners at -O0. The combines in
 // this group have been selected based on experiments to balance code size and

diff  --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 0674f5fd1ae06..0ebee2cfd8688 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -2094,6 +2094,68 @@ bool CombinerHelper::matchCommuteShift(MachineInstr &MI,
   return true;
 }
 
+bool CombinerHelper::matchLshrOfTruncOfLshr(MachineInstr &MI,
+                                            LshrOfTruncOfLshr &MatchInfo,
+                                            MachineInstr &ShiftMI) const {
+  assert(MI.getOpcode() == TargetOpcode::G_LSHR && "Expected a G_LSHR");
+
+  Register N0 = MI.getOperand(1).getReg();
+  Register N1 = MI.getOperand(2).getReg();
+  unsigned OpSizeInBits = MRI.getType(N0).getScalarSizeInBits();
+
+  APInt N1C, N001C;
+  if (!mi_match(N1, MRI, m_ICstOrSplat(N1C)))
+    return false;
+  auto N001 = ShiftMI.getOperand(2).getReg();
+  if (!mi_match(N001, MRI, m_ICstOrSplat(N001C)))
+    return false;
+
+  if (N001C.getBitWidth() > N1C.getBitWidth())
+    N1C = N1C.zext(N001C.getBitWidth());
+  else
+    N001C = N001C.zext(N1C.getBitWidth());
+
+  Register InnerShift = ShiftMI.getOperand(0).getReg();
+  LLT InnerShiftTy = MRI.getType(InnerShift);
+  uint64_t InnerShiftSize = InnerShiftTy.getScalarSizeInBits();
+  if ((N1C + N001C).ult(InnerShiftSize)) {
+    MatchInfo.Src = ShiftMI.getOperand(1).getReg();
+    MatchInfo.ShiftAmt = N1C + N001C;
+    MatchInfo.ShiftAmtTy = MRI.getType(N001);
+    MatchInfo.InnerShiftTy = InnerShiftTy;
+
+    if ((N001C + OpSizeInBits) == InnerShiftSize)
+      return true;
+    if (MRI.hasOneUse(N0) && MRI.hasOneUse(InnerShift)) {
+      MatchInfo.Mask = true;
+      MatchInfo.MaskVal = APInt(N1C.getBitWidth(), OpSizeInBits) - N1C;
+      return true;
+    }
+  }
+  return false;
+}
+
+void CombinerHelper::applyLshrOfTruncOfLshr(
+    MachineInstr &MI, LshrOfTruncOfLshr &MatchInfo) const {
+  assert(MI.getOpcode() == TargetOpcode::G_LSHR && "Expected a G_LSHR");
+
+  Register Dst = MI.getOperand(0).getReg();
+  auto ShiftAmt =
+      Builder.buildConstant(MatchInfo.ShiftAmtTy, MatchInfo.ShiftAmt);
+  auto Shift =
+      Builder.buildLShr(MatchInfo.InnerShiftTy, MatchInfo.Src, ShiftAmt);
+  if (MatchInfo.Mask == true) {
+    APInt MaskVal =
+        APInt::getLowBitsSet(MatchInfo.InnerShiftTy.getScalarSizeInBits(),
+                             MatchInfo.MaskVal.getZExtValue());
+    auto Mask = Builder.buildConstant(MatchInfo.InnerShiftTy, MaskVal);
+    auto And = Builder.buildAnd(MatchInfo.InnerShiftTy, Shift, Mask);
+    Builder.buildTrunc(Dst, And);
+  } else
+    Builder.buildTrunc(Dst, Shift);
+  MI.eraseFromParent();
+}
+
 bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI,
                                           unsigned &ShiftVal) const {
   assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");

diff  --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index 5f499e5e9700a..076a6235eef0a 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -369,5 +369,5 @@ def AArch64PostLegalizerCombiner
                         commute_constant_to_rhs, extract_vec_elt_combines,
                         push_freeze_to_prevent_poison_from_propagating,
                         combine_mul_cmlt, combine_use_vector_truncate, 
-                        extmultomull, truncsat_combines]> {
+                        extmultomull, truncsat_combines, lshr_of_trunc_of_lshr]> {
 }

diff  --git a/llvm/test/CodeGen/AArch64/lshr-trunc-lshr.ll b/llvm/test/CodeGen/AArch64/lshr-trunc-lshr.ll
new file mode 100644
index 0000000000000..8a576fc346bdc
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/lshr-trunc-lshr.ll
@@ -0,0 +1,125 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64-none-eabi -global-isel=0 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-eabi -global-isel=1 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
+define i32 @s32_test1(i64 %a) {
+; CHECK-LABEL: s32_test1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    lsr x0, x0, #48
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %r = lshr i64 %a, 32
+  %ret = trunc i64 %r to i32
+  %x = lshr i32 %ret, 16
+  ret i32 %x
+}
+
+define i32 @s32_test2(i64 %a) {
+; CHECK-LABEL: s32_test2:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ubfx x0, x0, #32, #16
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
+  %r = lshr i64 %a, 16
+  %ret = trunc i64 %r to i32
+  %x = lshr i32 %ret, 16
+  ret i32 %x
+}
+
+define <8 x i8> @v8s8_test1(<8 x i16> %a) {
+; CHECK-LABEL: v8s8_test1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ushr v0.8h, v0.8h, #12
+; CHECK-NEXT:    xtn v0.8b, v0.8h
+; CHECK-NEXT:    ret
+  %r = lshr <8 x i16> %a, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  %ret = trunc <8 x i16> %r to <8 x i8>
+  %x = lshr <8 x i8> %ret, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
+  ret <8 x i8> %x
+}
+
+define <8 x i8> @v8s8_test2(<8 x i16> %a) {
+; CHECK-SD-LABEL: v8s8_test2:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ushr v0.8h, v0.8h, #8
+; CHECK-SD-NEXT:    bic v0.8h, #240
+; CHECK-SD-NEXT:    xtn v0.8b, v0.8h
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: v8s8_test2:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.8h, #15
+; CHECK-GI-NEXT:    ushr v0.8h, v0.8h, #8
+; CHECK-GI-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-GI-NEXT:    xtn v0.8b, v0.8h
+; CHECK-GI-NEXT:    ret
+  %r = lshr <8 x i16> %a, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
+  %ret = trunc <8 x i16> %r to <8 x i8>
+  %x = lshr <8 x i8> %ret, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
+  ret <8 x i8> %x
+}
+
+define <4 x i16> @v4s16_test1(<4 x i32> %a) {
+; CHECK-LABEL: v4s16_test1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ushr v0.4s, v0.4s, #24
+; CHECK-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NEXT:    ret
+  %r = lshr <4 x i32> %a, <i32 16, i32 16, i32 16, i32 16>
+  %ret = trunc <4 x i32> %r to <4 x i16>
+  %x = lshr <4 x i16> %ret, <i16 8, i16 8, i16 8, i16 8>
+  ret <4 x i16> %x
+}
+
+define <4 x i16> @v4s16_test2(<4 x i32> %a) {
+; CHECK-SD-LABEL: v4s16_test2:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    shrn v0.4h, v0.4s, #16
+; CHECK-SD-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: v4s16_test2:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2d, #0x0000ff000000ff
+; CHECK-GI-NEXT:    ushr v0.4s, v0.4s, #16
+; CHECK-GI-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-GI-NEXT:    xtn v0.4h, v0.4s
+; CHECK-GI-NEXT:    ret
+  %r = lshr <4 x i32> %a, <i32 8, i32 8, i32 8, i32 8>
+  %ret = trunc <4 x i32> %r to <4 x i16>
+  %x = lshr <4 x i16> %ret, <i16 8, i16 8, i16 8, i16 8>
+  ret <4 x i16> %x
+}
+
+define <2 x i32> @v2s32_test1(<2 x i64> %a) {
+; CHECK-LABEL: v2s32_test1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ushr v0.2d, v0.2d, #48
+; CHECK-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NEXT:    ret
+  %r = lshr <2 x i64> %a, <i64 32, i64 32>
+  %ret = trunc <2 x i64> %r to <2 x i32>
+  %x = lshr <2 x i32> %ret, <i32 16, i32 16>
+  ret <2 x i32> %x
+}
+
+define <2 x i32> @v2s32_test2(<2 x i64> %a) {
+; CHECK-SD-LABEL: v2s32_test2:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    movi d1, #0x00ffff0000ffff
+; CHECK-SD-NEXT:    shrn v0.2s, v0.2d, #32
+; CHECK-SD-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: v2s32_test2:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2d, #0x0000000000ffff
+; CHECK-GI-NEXT:    ushr v0.2d, v0.2d, #32
+; CHECK-GI-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-GI-NEXT:    xtn v0.2s, v0.2d
+; CHECK-GI-NEXT:    ret
+  %r = lshr <2 x i64> %a, <i64 16, i64 16>
+  %ret = trunc <2 x i64> %r to <2 x i32>
+  %x = lshr <2 x i32> %ret, <i32 16, i32 16>
+  ret <2 x i32> %x
+}

diff  --git a/llvm/test/CodeGen/AArch64/rem-by-const.ll b/llvm/test/CodeGen/AArch64/rem-by-const.ll
index c57383ad9b1e7..599fa510d4aea 100644
--- a/llvm/test/CodeGen/AArch64/rem-by-const.ll
+++ b/llvm/test/CodeGen/AArch64/rem-by-const.ll
@@ -336,26 +336,15 @@ entry:
 }
 
 define i32 @ui32_100(i32 %a, i32 %b) {
-; CHECK-SD-LABEL: ui32_100:
-; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    mov w8, #34079 // =0x851f
-; CHECK-SD-NEXT:    mov w9, #100 // =0x64
-; CHECK-SD-NEXT:    movk w8, #20971, lsl #16
-; CHECK-SD-NEXT:    umull x8, w0, w8
-; CHECK-SD-NEXT:    lsr x8, x8, #37
-; CHECK-SD-NEXT:    msub w0, w8, w9, w0
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: ui32_100:
-; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    mov w8, #34079 // =0x851f
-; CHECK-GI-NEXT:    mov w9, #100 // =0x64
-; CHECK-GI-NEXT:    movk w8, #20971, lsl #16
-; CHECK-GI-NEXT:    umull x8, w0, w8
-; CHECK-GI-NEXT:    lsr x8, x8, #32
-; CHECK-GI-NEXT:    lsr w8, w8, #5
-; CHECK-GI-NEXT:    msub w0, w8, w9, w0
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: ui32_100:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov w8, #34079 // =0x851f
+; CHECK-NEXT:    mov w9, #100 // =0x64
+; CHECK-NEXT:    movk w8, #20971, lsl #16
+; CHECK-NEXT:    umull x8, w0, w8
+; CHECK-NEXT:    lsr x8, x8, #37
+; CHECK-NEXT:    msub w0, w8, w9, w0
+; CHECK-NEXT:    ret
 entry:
   %s = urem i32 %a, 100
   ret i32 %s
@@ -1619,15 +1608,25 @@ entry:
 }
 
 define <8 x i8> @uv8i8_100(<8 x i8> %d, <8 x i8> %e) {
-; CHECK-LABEL: uv8i8_100:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v1.8b, #41
-; CHECK-NEXT:    movi v2.8b, #100
-; CHECK-NEXT:    umull v1.8h, v0.8b, v1.8b
-; CHECK-NEXT:    shrn v1.8b, v1.8h, #8
-; CHECK-NEXT:    ushr v1.8b, v1.8b, #4
-; CHECK-NEXT:    mls v0.8b, v1.8b, v2.8b
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: uv8i8_100:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    movi v1.8b, #41
+; CHECK-SD-NEXT:    movi v2.8b, #100
+; CHECK-SD-NEXT:    umull v1.8h, v0.8b, v1.8b
+; CHECK-SD-NEXT:    shrn v1.8b, v1.8h, #8
+; CHECK-SD-NEXT:    ushr v1.8b, v1.8b, #4
+; CHECK-SD-NEXT:    mls v0.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: uv8i8_100:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    movi v1.8b, #41
+; CHECK-GI-NEXT:    movi v2.8b, #100
+; CHECK-GI-NEXT:    umull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT:    ushr v1.8h, v1.8h, #12
+; CHECK-GI-NEXT:    xtn v1.8b, v1.8h
+; CHECK-GI-NEXT:    mls v0.8b, v1.8b, v2.8b
+; CHECK-GI-NEXT:    ret
 entry:
   %s = urem <8 x i8> %d, <i8 100, i8 100, i8 100, i8 100, i8 100, i8 100, i8 100, i8 100>
   ret <8 x i8> %s
@@ -2301,8 +2300,8 @@ define <4 x i16> @uv4i16_100(<4 x i16> %d, <4 x i16> %e) {
 ; CHECK-GI-NEXT:    ldr d2, [x8, :lo12:.LCPI53_0]
 ; CHECK-GI-NEXT:    umull v1.4s, v1.4h, v2.4h
 ; CHECK-GI-NEXT:    movi v2.4h, #100
-; CHECK-GI-NEXT:    shrn v1.4h, v1.4s, #16
-; CHECK-GI-NEXT:    ushr v1.4h, v1.4h, #1
+; CHECK-GI-NEXT:    ushr v1.4s, v1.4s, #17
+; CHECK-GI-NEXT:    xtn v1.4h, v1.4s
 ; CHECK-GI-NEXT:    mls v0.4h, v1.4h, v2.4h
 ; CHECK-GI-NEXT:    ret
 entry:
@@ -2656,8 +2655,8 @@ define <2 x i32> @uv2i32_100(<2 x i32> %d, <2 x i32> %e) {
 ; CHECK-GI-NEXT:    movi v2.2s, #100
 ; CHECK-GI-NEXT:    ldr d1, [x8, :lo12:.LCPI63_0]
 ; CHECK-GI-NEXT:    umull v1.2d, v0.2s, v1.2s
-; CHECK-GI-NEXT:    shrn v1.2s, v1.2d, #32
-; CHECK-GI-NEXT:    ushr v1.2s, v1.2s, #5
+; CHECK-GI-NEXT:    ushr v1.2d, v1.2d, #37
+; CHECK-GI-NEXT:    xtn v1.2s, v1.2d
 ; CHECK-GI-NEXT:    mls v0.2s, v1.2s, v2.2s
 ; CHECK-GI-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/AArch64/urem-lkk.ll b/llvm/test/CodeGen/AArch64/urem-lkk.ll
index 0dd6685555826..40016c7e4ce0f 100644
--- a/llvm/test/CodeGen/AArch64/urem-lkk.ll
+++ b/llvm/test/CodeGen/AArch64/urem-lkk.ll
@@ -20,26 +20,15 @@ define i32 @fold_urem_positive_odd(i32 %x) {
 }
 
 define i32 @fold_urem_positive_even(i32 %x) {
-; CHECK-SD-LABEL: fold_urem_positive_even:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    mov w8, #16323 // =0x3fc3
-; CHECK-SD-NEXT:    mov w9, #1060 // =0x424
-; CHECK-SD-NEXT:    movk w8, #63310, lsl #16
-; CHECK-SD-NEXT:    umull x8, w0, w8
-; CHECK-SD-NEXT:    lsr x8, x8, #42
-; CHECK-SD-NEXT:    msub w0, w8, w9, w0
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: fold_urem_positive_even:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    mov w8, #16323 // =0x3fc3
-; CHECK-GI-NEXT:    mov w9, #1060 // =0x424
-; CHECK-GI-NEXT:    movk w8, #63310, lsl #16
-; CHECK-GI-NEXT:    umull x8, w0, w8
-; CHECK-GI-NEXT:    lsr x8, x8, #32
-; CHECK-GI-NEXT:    lsr w8, w8, #10
-; CHECK-GI-NEXT:    msub w0, w8, w9, w0
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: fold_urem_positive_even:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #16323 // =0x3fc3
+; CHECK-NEXT:    mov w9, #1060 // =0x424
+; CHECK-NEXT:    movk w8, #63310, lsl #16
+; CHECK-NEXT:    umull x8, w0, w8
+; CHECK-NEXT:    lsr x8, x8, #42
+; CHECK-NEXT:    msub w0, w8, w9, w0
+; CHECK-NEXT:    ret
   %1 = urem i32 %x, 1060
   ret i32 %1
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
index fd329e230e78b..bd5303213a690 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
@@ -971,100 +971,98 @@ define amdgpu_ps i32 @s_fshl_v4i8(i32 inreg %lhs.arg, i32 inreg %rhs.arg, i32 in
 ;
 ; GFX8-LABEL: s_fshl_v4i8:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_lshr_b32 s6, s1, 8
-; GFX8-NEXT:    s_lshr_b32 s7, s1, 16
-; GFX8-NEXT:    s_lshr_b32 s8, s1, 24
-; GFX8-NEXT:    s_and_b32 s1, s1, 0xff
-; GFX8-NEXT:    s_lshr_b32 s9, s2, 8
-; GFX8-NEXT:    s_lshr_b32 s10, s2, 16
-; GFX8-NEXT:    s_lshr_b32 s11, s2, 24
-; GFX8-NEXT:    s_and_b32 s12, s2, 7
-; GFX8-NEXT:    s_lshr_b32 s1, s1, 1
-; GFX8-NEXT:    s_andn2_b32 s2, 7, s2
+; GFX8-NEXT:    s_and_b32 s11, s2, 7
 ; GFX8-NEXT:    s_lshr_b32 s3, s0, 8
 ; GFX8-NEXT:    s_lshr_b32 s4, s0, 16
 ; GFX8-NEXT:    s_lshr_b32 s5, s0, 24
-; GFX8-NEXT:    s_lshl_b32 s0, s0, s12
-; GFX8-NEXT:    s_lshr_b32 s1, s1, s2
-; GFX8-NEXT:    s_or_b32 s0, s0, s1
-; GFX8-NEXT:    s_and_b32 s1, s9, 7
-; GFX8-NEXT:    s_and_b32 s2, s6, 0xff
-; GFX8-NEXT:    s_lshl_b32 s1, s3, s1
-; GFX8-NEXT:    s_lshr_b32 s2, s2, 1
-; GFX8-NEXT:    s_andn2_b32 s3, 7, s9
-; GFX8-NEXT:    s_lshr_b32 s2, s2, s3
-; GFX8-NEXT:    s_or_b32 s1, s1, s2
-; GFX8-NEXT:    s_and_b32 s2, s10, 7
-; GFX8-NEXT:    s_and_b32 s3, s7, 0xff
-; GFX8-NEXT:    s_lshl_b32 s2, s4, s2
+; GFX8-NEXT:    s_lshl_b32 s0, s0, s11
+; GFX8-NEXT:    s_and_b32 s11, s1, 0xff
+; GFX8-NEXT:    s_lshr_b32 s8, s2, 8
+; GFX8-NEXT:    s_lshr_b32 s9, s2, 16
+; GFX8-NEXT:    s_lshr_b32 s10, s2, 24
+; GFX8-NEXT:    s_lshr_b32 s11, s11, 1
+; GFX8-NEXT:    s_andn2_b32 s2, 7, s2
+; GFX8-NEXT:    s_lshr_b32 s2, s11, s2
+; GFX8-NEXT:    s_lshr_b32 s6, s1, 8
+; GFX8-NEXT:    s_or_b32 s0, s0, s2
+; GFX8-NEXT:    s_and_b32 s2, s8, 7
+; GFX8-NEXT:    s_lshl_b32 s2, s3, s2
+; GFX8-NEXT:    s_and_b32 s3, s6, 0xff
 ; GFX8-NEXT:    s_lshr_b32 s3, s3, 1
-; GFX8-NEXT:    s_andn2_b32 s4, 7, s10
-; GFX8-NEXT:    s_lshr_b32 s3, s3, s4
+; GFX8-NEXT:    s_andn2_b32 s6, 7, s8
+; GFX8-NEXT:    s_lshr_b32 s3, s3, s6
+; GFX8-NEXT:    s_lshr_b32 s7, s1, 16
 ; GFX8-NEXT:    s_or_b32 s2, s2, s3
-; GFX8-NEXT:    s_and_b32 s3, s11, 7
-; GFX8-NEXT:    s_and_b32 s1, s1, 0xff
-; GFX8-NEXT:    s_lshl_b32 s3, s5, s3
-; GFX8-NEXT:    s_lshr_b32 s4, s8, 1
-; GFX8-NEXT:    s_andn2_b32 s5, 7, s11
-; GFX8-NEXT:    s_and_b32 s0, s0, 0xff
-; GFX8-NEXT:    s_lshl_b32 s1, s1, 8
-; GFX8-NEXT:    s_lshr_b32 s4, s4, s5
-; GFX8-NEXT:    s_or_b32 s0, s0, s1
-; GFX8-NEXT:    s_and_b32 s1, s2, 0xff
+; GFX8-NEXT:    s_and_b32 s3, s9, 7
+; GFX8-NEXT:    s_lshl_b32 s3, s4, s3
+; GFX8-NEXT:    s_and_b32 s4, s7, 0xff
+; GFX8-NEXT:    s_lshr_b32 s4, s4, 1
+; GFX8-NEXT:    s_andn2_b32 s6, 7, s9
+; GFX8-NEXT:    s_lshr_b32 s4, s4, s6
 ; GFX8-NEXT:    s_or_b32 s3, s3, s4
-; GFX8-NEXT:    s_lshl_b32 s1, s1, 16
-; GFX8-NEXT:    s_or_b32 s0, s0, s1
-; GFX8-NEXT:    s_and_b32 s1, s3, 0xff
+; GFX8-NEXT:    s_and_b32 s4, s10, 7
+; GFX8-NEXT:    s_lshl_b32 s4, s5, s4
+; GFX8-NEXT:    s_lshr_b32 s1, s1, 25
+; GFX8-NEXT:    s_andn2_b32 s5, 7, s10
+; GFX8-NEXT:    s_and_b32 s2, s2, 0xff
+; GFX8-NEXT:    s_lshr_b32 s1, s1, s5
+; GFX8-NEXT:    s_and_b32 s0, s0, 0xff
+; GFX8-NEXT:    s_lshl_b32 s2, s2, 8
+; GFX8-NEXT:    s_or_b32 s1, s4, s1
+; GFX8-NEXT:    s_or_b32 s0, s0, s2
+; GFX8-NEXT:    s_and_b32 s2, s3, 0xff
+; GFX8-NEXT:    s_lshl_b32 s2, s2, 16
+; GFX8-NEXT:    s_and_b32 s1, s1, 0xff
+; GFX8-NEXT:    s_or_b32 s0, s0, s2
 ; GFX8-NEXT:    s_lshl_b32 s1, s1, 24
 ; GFX8-NEXT:    s_or_b32 s0, s0, s1
 ; GFX8-NEXT:    ; return to shader part epilog
 ;
 ; GFX9-LABEL: s_fshl_v4i8:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    s_lshr_b32 s6, s1, 8
-; GFX9-NEXT:    s_lshr_b32 s7, s1, 16
-; GFX9-NEXT:    s_lshr_b32 s8, s1, 24
-; GFX9-NEXT:    s_and_b32 s1, s1, 0xff
-; GFX9-NEXT:    s_lshr_b32 s9, s2, 8
-; GFX9-NEXT:    s_lshr_b32 s10, s2, 16
-; GFX9-NEXT:    s_lshr_b32 s11, s2, 24
-; GFX9-NEXT:    s_and_b32 s12, s2, 7
-; GFX9-NEXT:    s_lshr_b32 s1, s1, 1
-; GFX9-NEXT:    s_andn2_b32 s2, 7, s2
+; GFX9-NEXT:    s_and_b32 s11, s2, 7
 ; GFX9-NEXT:    s_lshr_b32 s3, s0, 8
 ; GFX9-NEXT:    s_lshr_b32 s4, s0, 16
 ; GFX9-NEXT:    s_lshr_b32 s5, s0, 24
-; GFX9-NEXT:    s_lshl_b32 s0, s0, s12
-; GFX9-NEXT:    s_lshr_b32 s1, s1, s2
-; GFX9-NEXT:    s_or_b32 s0, s0, s1
-; GFX9-NEXT:    s_and_b32 s1, s9, 7
-; GFX9-NEXT:    s_and_b32 s2, s6, 0xff
-; GFX9-NEXT:    s_lshl_b32 s1, s3, s1
-; GFX9-NEXT:    s_lshr_b32 s2, s2, 1
-; GFX9-NEXT:    s_andn2_b32 s3, 7, s9
-; GFX9-NEXT:    s_lshr_b32 s2, s2, s3
-; GFX9-NEXT:    s_or_b32 s1, s1, s2
-; GFX9-NEXT:    s_and_b32 s2, s10, 7
-; GFX9-NEXT:    s_and_b32 s3, s7, 0xff
-; GFX9-NEXT:    s_lshl_b32 s2, s4, s2
+; GFX9-NEXT:    s_lshl_b32 s0, s0, s11
+; GFX9-NEXT:    s_and_b32 s11, s1, 0xff
+; GFX9-NEXT:    s_lshr_b32 s8, s2, 8
+; GFX9-NEXT:    s_lshr_b32 s9, s2, 16
+; GFX9-NEXT:    s_lshr_b32 s10, s2, 24
+; GFX9-NEXT:    s_lshr_b32 s11, s11, 1
+; GFX9-NEXT:    s_andn2_b32 s2, 7, s2
+; GFX9-NEXT:    s_lshr_b32 s2, s11, s2
+; GFX9-NEXT:    s_lshr_b32 s6, s1, 8
+; GFX9-NEXT:    s_or_b32 s0, s0, s2
+; GFX9-NEXT:    s_and_b32 s2, s8, 7
+; GFX9-NEXT:    s_lshl_b32 s2, s3, s2
+; GFX9-NEXT:    s_and_b32 s3, s6, 0xff
 ; GFX9-NEXT:    s_lshr_b32 s3, s3, 1
-; GFX9-NEXT:    s_andn2_b32 s4, 7, s10
-; GFX9-NEXT:    s_lshr_b32 s3, s3, s4
+; GFX9-NEXT:    s_andn2_b32 s6, 7, s8
+; GFX9-NEXT:    s_lshr_b32 s3, s3, s6
+; GFX9-NEXT:    s_lshr_b32 s7, s1, 16
 ; GFX9-NEXT:    s_or_b32 s2, s2, s3
-; GFX9-NEXT:    s_and_b32 s3, s11, 7
-; GFX9-NEXT:    s_and_b32 s1, s1, 0xff
-; GFX9-NEXT:    s_lshl_b32 s3, s5, s3
-; GFX9-NEXT:    s_lshr_b32 s4, s8, 1
-; GFX9-NEXT:    s_andn2_b32 s5, 7, s11
-; GFX9-NEXT:    s_and_b32 s0, s0, 0xff
-; GFX9-NEXT:    s_lshl_b32 s1, s1, 8
-; GFX9-NEXT:    s_lshr_b32 s4, s4, s5
-; GFX9-NEXT:    s_or_b32 s0, s0, s1
-; GFX9-NEXT:    s_and_b32 s1, s2, 0xff
+; GFX9-NEXT:    s_and_b32 s3, s9, 7
+; GFX9-NEXT:    s_lshl_b32 s3, s4, s3
+; GFX9-NEXT:    s_and_b32 s4, s7, 0xff
+; GFX9-NEXT:    s_lshr_b32 s4, s4, 1
+; GFX9-NEXT:    s_andn2_b32 s6, 7, s9
+; GFX9-NEXT:    s_lshr_b32 s4, s4, s6
 ; GFX9-NEXT:    s_or_b32 s3, s3, s4
-; GFX9-NEXT:    s_lshl_b32 s1, s1, 16
-; GFX9-NEXT:    s_or_b32 s0, s0, s1
-; GFX9-NEXT:    s_and_b32 s1, s3, 0xff
+; GFX9-NEXT:    s_and_b32 s4, s10, 7
+; GFX9-NEXT:    s_lshl_b32 s4, s5, s4
+; GFX9-NEXT:    s_lshr_b32 s1, s1, 25
+; GFX9-NEXT:    s_andn2_b32 s5, 7, s10
+; GFX9-NEXT:    s_and_b32 s2, s2, 0xff
+; GFX9-NEXT:    s_lshr_b32 s1, s1, s5
+; GFX9-NEXT:    s_and_b32 s0, s0, 0xff
+; GFX9-NEXT:    s_lshl_b32 s2, s2, 8
+; GFX9-NEXT:    s_or_b32 s1, s4, s1
+; GFX9-NEXT:    s_or_b32 s0, s0, s2
+; GFX9-NEXT:    s_and_b32 s2, s3, 0xff
+; GFX9-NEXT:    s_lshl_b32 s2, s2, 16
+; GFX9-NEXT:    s_and_b32 s1, s1, 0xff
+; GFX9-NEXT:    s_or_b32 s0, s0, s2
 ; GFX9-NEXT:    s_lshl_b32 s1, s1, 24
 ; GFX9-NEXT:    s_or_b32 s0, s0, s1
 ; GFX9-NEXT:    ; return to shader part epilog
@@ -1072,100 +1070,98 @@ define amdgpu_ps i32 @s_fshl_v4i8(i32 inreg %lhs.arg, i32 inreg %rhs.arg, i32 in
 ; GFX10-LABEL: s_fshl_v4i8:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_lshr_b32 s6, s1, 8
-; GFX10-NEXT:    s_lshr_b32 s7, s1, 16
-; GFX10-NEXT:    s_lshr_b32 s8, s1, 24
-; GFX10-NEXT:    s_and_b32 s1, s1, 0xff
-; GFX10-NEXT:    s_lshr_b32 s9, s2, 8
-; GFX10-NEXT:    s_lshr_b32 s10, s2, 16
-; GFX10-NEXT:    s_lshr_b32 s11, s2, 24
+; GFX10-NEXT:    s_and_b32 s11, s1, 0xff
+; GFX10-NEXT:    s_lshr_b32 s8, s2, 8
+; GFX10-NEXT:    s_lshr_b32 s9, s2, 16
+; GFX10-NEXT:    s_lshr_b32 s10, s2, 24
 ; GFX10-NEXT:    s_and_b32 s12, s2, 7
-; GFX10-NEXT:    s_lshr_b32 s1, s1, 1
+; GFX10-NEXT:    s_lshr_b32 s11, s11, 1
 ; GFX10-NEXT:    s_andn2_b32 s2, 7, s2
+; GFX10-NEXT:    s_and_b32 s6, s6, 0xff
 ; GFX10-NEXT:    s_lshr_b32 s3, s0, 8
-; GFX10-NEXT:    s_lshr_b32 s1, s1, s2
-; GFX10-NEXT:    s_and_b32 s2, s6, 0xff
-; GFX10-NEXT:    s_and_b32 s6, s9, 7
-; GFX10-NEXT:    s_lshr_b32 s2, s2, 1
-; GFX10-NEXT:    s_andn2_b32 s9, 7, s9
+; GFX10-NEXT:    s_lshr_b32 s2, s11, s2
+; GFX10-NEXT:    s_and_b32 s11, s8, 7
+; GFX10-NEXT:    s_lshr_b32 s6, s6, 1
+; GFX10-NEXT:    s_andn2_b32 s8, 7, s8
 ; GFX10-NEXT:    s_lshr_b32 s4, s0, 16
 ; GFX10-NEXT:    s_lshr_b32 s5, s0, 24
+; GFX10-NEXT:    s_lshr_b32 s7, s1, 16
 ; GFX10-NEXT:    s_lshl_b32 s0, s0, s12
-; GFX10-NEXT:    s_lshl_b32 s3, s3, s6
-; GFX10-NEXT:    s_lshr_b32 s2, s2, s9
-; GFX10-NEXT:    s_or_b32 s0, s0, s1
-; GFX10-NEXT:    s_or_b32 s1, s3, s2
-; GFX10-NEXT:    s_and_b32 s2, s7, 0xff
-; GFX10-NEXT:    s_and_b32 s3, s10, 7
-; GFX10-NEXT:    s_lshr_b32 s2, s2, 1
-; GFX10-NEXT:    s_andn2_b32 s6, 7, s10
-; GFX10-NEXT:    s_lshl_b32 s3, s4, s3
-; GFX10-NEXT:    s_lshr_b32 s2, s2, s6
-; GFX10-NEXT:    s_and_b32 s4, s11, 7
-; GFX10-NEXT:    s_lshr_b32 s6, s8, 1
-; GFX10-NEXT:    s_andn2_b32 s7, 7, s11
-; GFX10-NEXT:    s_lshl_b32 s4, s5, s4
-; GFX10-NEXT:    s_lshr_b32 s5, s6, s7
-; GFX10-NEXT:    s_or_b32 s2, s3, s2
-; GFX10-NEXT:    s_and_b32 s1, s1, 0xff
-; GFX10-NEXT:    s_or_b32 s3, s4, s5
-; GFX10-NEXT:    s_and_b32 s0, s0, 0xff
-; GFX10-NEXT:    s_lshl_b32 s1, s1, 8
+; GFX10-NEXT:    s_lshl_b32 s3, s3, s11
+; GFX10-NEXT:    s_lshr_b32 s6, s6, s8
+; GFX10-NEXT:    s_or_b32 s0, s0, s2
+; GFX10-NEXT:    s_or_b32 s2, s3, s6
+; GFX10-NEXT:    s_and_b32 s3, s7, 0xff
+; GFX10-NEXT:    s_and_b32 s6, s9, 7
+; GFX10-NEXT:    s_lshr_b32 s3, s3, 1
+; GFX10-NEXT:    s_andn2_b32 s7, 7, s9
+; GFX10-NEXT:    s_lshl_b32 s4, s4, s6
+; GFX10-NEXT:    s_lshr_b32 s3, s3, s7
+; GFX10-NEXT:    s_and_b32 s6, s10, 7
+; GFX10-NEXT:    s_lshr_b32 s1, s1, 25
+; GFX10-NEXT:    s_andn2_b32 s7, 7, s10
+; GFX10-NEXT:    s_lshl_b32 s5, s5, s6
+; GFX10-NEXT:    s_lshr_b32 s1, s1, s7
+; GFX10-NEXT:    s_or_b32 s3, s4, s3
 ; GFX10-NEXT:    s_and_b32 s2, s2, 0xff
-; GFX10-NEXT:    s_or_b32 s0, s0, s1
-; GFX10-NEXT:    s_lshl_b32 s1, s2, 16
-; GFX10-NEXT:    s_and_b32 s2, s3, 0xff
-; GFX10-NEXT:    s_or_b32 s0, s0, s1
-; GFX10-NEXT:    s_lshl_b32 s1, s2, 24
+; GFX10-NEXT:    s_or_b32 s1, s5, s1
+; GFX10-NEXT:    s_and_b32 s0, s0, 0xff
+; GFX10-NEXT:    s_lshl_b32 s2, s2, 8
+; GFX10-NEXT:    s_and_b32 s3, s3, 0xff
+; GFX10-NEXT:    s_or_b32 s0, s0, s2
+; GFX10-NEXT:    s_lshl_b32 s2, s3, 16
+; GFX10-NEXT:    s_and_b32 s1, s1, 0xff
+; GFX10-NEXT:    s_or_b32 s0, s0, s2
+; GFX10-NEXT:    s_lshl_b32 s1, s1, 24
 ; GFX10-NEXT:    s_or_b32 s0, s0, s1
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: s_fshl_v4i8:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_lshr_b32 s6, s1, 8
-; GFX11-NEXT:    s_lshr_b32 s7, s1, 16
-; GFX11-NEXT:    s_lshr_b32 s8, s1, 24
-; GFX11-NEXT:    s_and_b32 s1, s1, 0xff
-; GFX11-NEXT:    s_lshr_b32 s9, s2, 8
-; GFX11-NEXT:    s_lshr_b32 s10, s2, 16
-; GFX11-NEXT:    s_lshr_b32 s11, s2, 24
+; GFX11-NEXT:    s_and_b32 s11, s1, 0xff
+; GFX11-NEXT:    s_lshr_b32 s8, s2, 8
+; GFX11-NEXT:    s_lshr_b32 s9, s2, 16
+; GFX11-NEXT:    s_lshr_b32 s10, s2, 24
 ; GFX11-NEXT:    s_and_b32 s12, s2, 7
-; GFX11-NEXT:    s_lshr_b32 s1, s1, 1
+; GFX11-NEXT:    s_lshr_b32 s11, s11, 1
 ; GFX11-NEXT:    s_and_not1_b32 s2, 7, s2
+; GFX11-NEXT:    s_and_b32 s6, s6, 0xff
 ; GFX11-NEXT:    s_lshr_b32 s3, s0, 8
-; GFX11-NEXT:    s_lshr_b32 s1, s1, s2
-; GFX11-NEXT:    s_and_b32 s2, s6, 0xff
-; GFX11-NEXT:    s_and_b32 s6, s9, 7
-; GFX11-NEXT:    s_lshr_b32 s2, s2, 1
-; GFX11-NEXT:    s_and_not1_b32 s9, 7, s9
+; GFX11-NEXT:    s_lshr_b32 s2, s11, s2
+; GFX11-NEXT:    s_and_b32 s11, s8, 7
+; GFX11-NEXT:    s_lshr_b32 s6, s6, 1
+; GFX11-NEXT:    s_and_not1_b32 s8, 7, s8
 ; GFX11-NEXT:    s_lshr_b32 s4, s0, 16
 ; GFX11-NEXT:    s_lshr_b32 s5, s0, 24
+; GFX11-NEXT:    s_lshr_b32 s7, s1, 16
 ; GFX11-NEXT:    s_lshl_b32 s0, s0, s12
-; GFX11-NEXT:    s_lshl_b32 s3, s3, s6
-; GFX11-NEXT:    s_lshr_b32 s2, s2, s9
-; GFX11-NEXT:    s_or_b32 s0, s0, s1
-; GFX11-NEXT:    s_or_b32 s1, s3, s2
-; GFX11-NEXT:    s_and_b32 s2, s7, 0xff
-; GFX11-NEXT:    s_and_b32 s3, s10, 7
-; GFX11-NEXT:    s_lshr_b32 s2, s2, 1
-; GFX11-NEXT:    s_and_not1_b32 s6, 7, s10
-; GFX11-NEXT:    s_lshl_b32 s3, s4, s3
-; GFX11-NEXT:    s_lshr_b32 s2, s2, s6
-; GFX11-NEXT:    s_and_b32 s4, s11, 7
-; GFX11-NEXT:    s_lshr_b32 s6, s8, 1
-; GFX11-NEXT:    s_and_not1_b32 s7, 7, s11
-; GFX11-NEXT:    s_lshl_b32 s4, s5, s4
-; GFX11-NEXT:    s_lshr_b32 s5, s6, s7
-; GFX11-NEXT:    s_or_b32 s2, s3, s2
-; GFX11-NEXT:    s_and_b32 s1, s1, 0xff
-; GFX11-NEXT:    s_or_b32 s3, s4, s5
-; GFX11-NEXT:    s_and_b32 s0, s0, 0xff
-; GFX11-NEXT:    s_lshl_b32 s1, s1, 8
+; GFX11-NEXT:    s_lshl_b32 s3, s3, s11
+; GFX11-NEXT:    s_lshr_b32 s6, s6, s8
+; GFX11-NEXT:    s_or_b32 s0, s0, s2
+; GFX11-NEXT:    s_or_b32 s2, s3, s6
+; GFX11-NEXT:    s_and_b32 s3, s7, 0xff
+; GFX11-NEXT:    s_and_b32 s6, s9, 7
+; GFX11-NEXT:    s_lshr_b32 s3, s3, 1
+; GFX11-NEXT:    s_and_not1_b32 s7, 7, s9
+; GFX11-NEXT:    s_lshl_b32 s4, s4, s6
+; GFX11-NEXT:    s_lshr_b32 s3, s3, s7
+; GFX11-NEXT:    s_and_b32 s6, s10, 7
+; GFX11-NEXT:    s_lshr_b32 s1, s1, 25
+; GFX11-NEXT:    s_and_not1_b32 s7, 7, s10
+; GFX11-NEXT:    s_lshl_b32 s5, s5, s6
+; GFX11-NEXT:    s_lshr_b32 s1, s1, s7
+; GFX11-NEXT:    s_or_b32 s3, s4, s3
 ; GFX11-NEXT:    s_and_b32 s2, s2, 0xff
-; GFX11-NEXT:    s_or_b32 s0, s0, s1
-; GFX11-NEXT:    s_lshl_b32 s1, s2, 16
-; GFX11-NEXT:    s_and_b32 s2, s3, 0xff
-; GFX11-NEXT:    s_or_b32 s0, s0, s1
-; GFX11-NEXT:    s_lshl_b32 s1, s2, 24
+; GFX11-NEXT:    s_or_b32 s1, s5, s1
+; GFX11-NEXT:    s_and_b32 s0, s0, 0xff
+; GFX11-NEXT:    s_lshl_b32 s2, s2, 8
+; GFX11-NEXT:    s_and_b32 s3, s3, 0xff
+; GFX11-NEXT:    s_or_b32 s0, s0, s2
+; GFX11-NEXT:    s_lshl_b32 s2, s3, 16
+; GFX11-NEXT:    s_and_b32 s1, s1, 0xff
+; GFX11-NEXT:    s_or_b32 s0, s0, s2
+; GFX11-NEXT:    s_lshl_b32 s1, s1, 24
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX11-NEXT:    s_or_b32 s0, s0, s1
 ; GFX11-NEXT:    ; return to shader part epilog
@@ -1248,18 +1244,18 @@ define i32 @v_fshl_v4i8(i32 %lhs.arg, i32 %rhs.arg, i32 %amt.arg) {
 ; GFX8-NEXT:    v_mov_b32_e32 v7, 0xff
 ; GFX8-NEXT:    v_or_b32_e32 v3, v3, v4
 ; GFX8-NEXT:    v_mov_b32_e32 v4, 7
-; GFX8-NEXT:    v_mov_b32_e32 v9, -1
+; GFX8-NEXT:    v_mov_b32_e32 v8, -1
 ; GFX8-NEXT:    v_and_b32_sdwa v5, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_and_b32_sdwa v7, v1, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX8-NEXT:    v_xor_b32_sdwa v10, v2, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_xor_b32_sdwa v9, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_and_b32_sdwa v4, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
-; GFX8-NEXT:    v_xor_b32_sdwa v2, v2, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+; GFX8-NEXT:    v_xor_b32_sdwa v2, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v7, 1, v7
-; GFX8-NEXT:    v_and_b32_e32 v10, 7, v10
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT:    v_and_b32_e32 v9, 7, v9
+; GFX8-NEXT:    v_lshrrev_b32_e32 v1, 25, v1
 ; GFX8-NEXT:    v_and_b32_e32 v2, 7, v2
 ; GFX8-NEXT:    v_lshlrev_b16_sdwa v5, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    v_lshrrev_b16_e32 v7, v10, v7
+; GFX8-NEXT:    v_lshrrev_b16_e32 v7, v9, v7
 ; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v1, v2, v1
 ; GFX8-NEXT:    v_or_b32_e32 v5, v5, v7
@@ -1298,21 +1294,21 @@ define i32 @v_fshl_v4i8(i32 %lhs.arg, i32 %rhs.arg, i32 %amt.arg) {
 ; GFX9-NEXT:    v_mov_b32_e32 v7, 0xff
 ; GFX9-NEXT:    v_or_b32_e32 v3, v3, v4
 ; GFX9-NEXT:    v_mov_b32_e32 v4, 7
-; GFX9-NEXT:    v_mov_b32_e32 v10, -1
+; GFX9-NEXT:    v_mov_b32_e32 v9, -1
 ; GFX9-NEXT:    v_and_b32_sdwa v5, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT:    v_and_b32_sdwa v9, v1, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT:    v_xor_b32_sdwa v11, v2, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT:    v_and_b32_sdwa v8, v1, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT:    v_xor_b32_sdwa v10, v2, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX9-NEXT:    v_and_b32_sdwa v4, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
-; GFX9-NEXT:    v_xor_b32_sdwa v2, v2, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
-; GFX9-NEXT:    v_lshrrev_b16_e32 v9, 1, v9
-; GFX9-NEXT:    v_and_b32_e32 v11, 7, v11
-; GFX9-NEXT:    v_lshrrev_b16_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX9-NEXT:    v_xor_b32_sdwa v2, v2, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+; GFX9-NEXT:    v_lshrrev_b16_e32 v8, 1, v8
+; GFX9-NEXT:    v_and_b32_e32 v10, 7, v10
+; GFX9-NEXT:    v_lshrrev_b32_e32 v1, 25, v1
 ; GFX9-NEXT:    v_and_b32_e32 v2, 7, v2
 ; GFX9-NEXT:    v_lshlrev_b16_sdwa v5, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX9-NEXT:    v_lshrrev_b16_e32 v9, v11, v9
+; GFX9-NEXT:    v_lshrrev_b16_e32 v8, v10, v8
 ; GFX9-NEXT:    v_lshlrev_b16_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
 ; GFX9-NEXT:    v_lshrrev_b16_e32 v1, v2, v1
-; GFX9-NEXT:    v_or_b32_e32 v5, v5, v9
+; GFX9-NEXT:    v_or_b32_e32 v5, v5, v8
 ; GFX9-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX9-NEXT:    v_mov_b32_e32 v1, 8
 ; GFX9-NEXT:    v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -1327,111 +1323,109 @@ define i32 @v_fshl_v4i8(i32 %lhs.arg, i32 %rhs.arg, i32 %amt.arg) {
 ; GFX10-LABEL: v_fshl_v4i8:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_lshrrev_b32_e32 v6, 8, v2
+; GFX10-NEXT:    v_lshrrev_b32_e32 v5, 8, v2
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v3, 8, v0
-; GFX10-NEXT:    v_and_b32_e32 v8, 7, v2
-; GFX10-NEXT:    v_and_b32_e32 v9, 0xff, v1
-; GFX10-NEXT:    v_xor_b32_e32 v10, -1, v2
-; GFX10-NEXT:    v_and_b32_e32 v11, 7, v6
+; GFX10-NEXT:    v_and_b32_e32 v7, 7, v2
+; GFX10-NEXT:    v_and_b32_e32 v8, 0xff, v1
+; GFX10-NEXT:    v_xor_b32_e32 v9, -1, v2
+; GFX10-NEXT:    v_and_b32_e32 v10, 7, v5
+; GFX10-NEXT:    v_lshrrev_b32_e32 v11, 8, v1
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v4, 16, v0
-; GFX10-NEXT:    v_lshrrev_b32_e32 v5, 24, v0
-; GFX10-NEXT:    v_lshrrev_b32_e32 v7, 8, v1
-; GFX10-NEXT:    v_lshlrev_b16 v0, v8, v0
-; GFX10-NEXT:    v_lshrrev_b16 v8, 1, v9
-; GFX10-NEXT:    v_and_b32_e32 v9, 7, v10
-; GFX10-NEXT:    v_lshlrev_b16 v3, v11, v3
+; GFX10-NEXT:    v_lshrrev_b32_e32 v6, 24, v0
+; GFX10-NEXT:    v_lshlrev_b16 v0, v7, v0
+; GFX10-NEXT:    v_lshrrev_b16 v7, 1, v8
+; GFX10-NEXT:    v_and_b32_e32 v8, 7, v9
+; GFX10-NEXT:    v_lshlrev_b16 v3, v10, v3
+; GFX10-NEXT:    v_and_b32_e32 v9, 0xff, v11
 ; GFX10-NEXT:    v_mov_b32_e32 v10, 0xff
 ; GFX10-NEXT:    v_mov_b32_e32 v11, -1
-; GFX10-NEXT:    v_lshrrev_b32_e32 v12, 24, v1
-; GFX10-NEXT:    v_and_b32_e32 v7, 0xff, v7
-; GFX10-NEXT:    v_xor_b32_e32 v6, -1, v6
-; GFX10-NEXT:    v_mov_b32_e32 v13, 7
-; GFX10-NEXT:    v_and_b32_sdwa v1, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX10-NEXT:    v_xor_b32_sdwa v10, v2, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT:    v_xor_b32_e32 v5, -1, v5
+; GFX10-NEXT:    v_mov_b32_e32 v12, 7
+; GFX10-NEXT:    v_lshrrev_b16 v9, 1, v9
+; GFX10-NEXT:    v_and_b32_sdwa v10, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT:    v_xor_b32_sdwa v13, v2, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX10-NEXT:    v_xor_b32_sdwa v11, v2, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
-; GFX10-NEXT:    v_lshrrev_b16 v7, 1, v7
-; GFX10-NEXT:    v_and_b32_e32 v6, 7, v6
-; GFX10-NEXT:    v_and_b32_sdwa v14, v2, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX10-NEXT:    v_lshrrev_b16 v1, 1, v1
-; GFX10-NEXT:    v_and_b32_e32 v10, 7, v10
-; GFX10-NEXT:    v_and_b32_sdwa v2, v2, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
-; GFX10-NEXT:    v_lshrrev_b16 v12, 1, v12
+; GFX10-NEXT:    v_and_b32_e32 v5, 7, v5
+; GFX10-NEXT:    v_and_b32_sdwa v14, v2, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT:    v_lshrrev_b16 v10, 1, v10
+; GFX10-NEXT:    v_and_b32_e32 v13, 7, v13
+; GFX10-NEXT:    v_and_b32_sdwa v2, v2, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
+; GFX10-NEXT:    v_lshrrev_b32_e32 v1, 25, v1
 ; GFX10-NEXT:    v_and_b32_e32 v11, 7, v11
-; GFX10-NEXT:    v_lshrrev_b16 v6, v6, v7
+; GFX10-NEXT:    v_lshrrev_b16 v5, v5, v9
 ; GFX10-NEXT:    v_lshlrev_b16 v4, v14, v4
-; GFX10-NEXT:    v_lshrrev_b16 v1, v10, v1
-; GFX10-NEXT:    v_lshlrev_b16 v2, v2, v5
-; GFX10-NEXT:    v_lshrrev_b16 v5, v11, v12
-; GFX10-NEXT:    v_lshrrev_b16 v7, v9, v8
-; GFX10-NEXT:    v_or_b32_e32 v3, v3, v6
-; GFX10-NEXT:    v_mov_b32_e32 v6, 8
-; GFX10-NEXT:    v_or_b32_e32 v1, v4, v1
-; GFX10-NEXT:    v_or_b32_e32 v2, v2, v5
-; GFX10-NEXT:    v_or_b32_e32 v0, v0, v7
-; GFX10-NEXT:    v_lshlrev_b32_sdwa v3, v6, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT:    v_lshrrev_b16 v9, v13, v10
+; GFX10-NEXT:    v_lshlrev_b16 v2, v2, v6
+; GFX10-NEXT:    v_lshrrev_b16 v1, v11, v1
+; GFX10-NEXT:    v_lshrrev_b16 v6, v8, v7
+; GFX10-NEXT:    v_or_b32_e32 v3, v3, v5
+; GFX10-NEXT:    v_mov_b32_e32 v5, 8
+; GFX10-NEXT:    v_or_b32_e32 v4, v4, v9
+; GFX10-NEXT:    v_or_b32_e32 v1, v2, v1
+; GFX10-NEXT:    v_or_b32_e32 v0, v0, v6
+; GFX10-NEXT:    v_lshlrev_b32_sdwa v2, v5, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX10-NEXT:    v_and_b32_e32 v3, 0xff, v4
 ; GFX10-NEXT:    v_and_b32_e32 v1, 0xff, v1
-; GFX10-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GFX10-NEXT:    v_and_or_b32 v0, 0xff, v0, v3
-; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX10-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
-; GFX10-NEXT:    v_or3_b32 v0, v0, v1, v2
+; GFX10-NEXT:    v_and_or_b32 v0, 0xff, v0, v2
+; GFX10-NEXT:    v_lshlrev_b32_e32 v2, 16, v3
+; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 24, v1
+; GFX10-NEXT:    v_or3_b32 v0, v0, v2, v1
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_fshl_v4i8:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v6, 8, v1
-; GFX11-NEXT:    v_lshrrev_b32_e32 v9, 8, v2
+; GFX11-NEXT:    v_lshrrev_b32_e32 v8, 8, v2
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v3, 8, v0
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v7, 16, v1
-; GFX11-NEXT:    v_lshrrev_b32_e32 v10, 16, v2
+; GFX11-NEXT:    v_lshrrev_b32_e32 v9, 16, v2
 ; GFX11-NEXT:    v_and_b32_e32 v6, 0xff, v6
-; GFX11-NEXT:    v_xor_b32_e32 v13, -1, v9
-; GFX11-NEXT:    v_lshrrev_b32_e32 v11, 24, v2
-; GFX11-NEXT:    v_and_b32_e32 v9, 7, v9
-; GFX11-NEXT:    v_lshrrev_b32_e32 v8, 24, v1
+; GFX11-NEXT:    v_xor_b32_e32 v13, -1, v8
+; GFX11-NEXT:    v_lshrrev_b32_e32 v10, 24, v2
+; GFX11-NEXT:    v_and_b32_e32 v8, 7, v8
+; GFX11-NEXT:    v_and_b32_e32 v7, 0xff, v7
 ; GFX11-NEXT:    v_lshrrev_b16 v6, 1, v6
 ; GFX11-NEXT:    v_and_b32_e32 v13, 7, v13
-; GFX11-NEXT:    v_and_b32_e32 v7, 0xff, v7
-; GFX11-NEXT:    v_lshlrev_b16 v3, v9, v3
-; GFX11-NEXT:    v_xor_b32_e32 v9, -1, v10
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v4, 16, v0
-; GFX11-NEXT:    v_lshrrev_b16 v6, v13, v6
-; GFX11-NEXT:    v_xor_b32_e32 v13, -1, v11
+; GFX11-NEXT:    v_lshlrev_b16 v3, v8, v3
+; GFX11-NEXT:    v_xor_b32_e32 v8, -1, v9
 ; GFX11-NEXT:    v_lshrrev_b32_e32 v5, 24, v0
-; GFX11-NEXT:    v_and_b32_e32 v12, 7, v2
-; GFX11-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX11-NEXT:    v_lshrrev_b16 v6, v13, v6
+; GFX11-NEXT:    v_xor_b32_e32 v13, -1, v10
+; GFX11-NEXT:    v_and_b32_e32 v11, 7, v2
+; GFX11-NEXT:    v_and_b32_e32 v12, 0xff, v1
 ; GFX11-NEXT:    v_xor_b32_e32 v2, -1, v2
-; GFX11-NEXT:    v_and_b32_e32 v10, 7, v10
-; GFX11-NEXT:    v_lshrrev_b16 v7, 1, v7
 ; GFX11-NEXT:    v_and_b32_e32 v9, 7, v9
-; GFX11-NEXT:    v_and_b32_e32 v11, 7, v11
-; GFX11-NEXT:    v_lshrrev_b16 v8, 1, v8
+; GFX11-NEXT:    v_lshrrev_b16 v7, 1, v7
+; GFX11-NEXT:    v_and_b32_e32 v8, 7, v8
+; GFX11-NEXT:    v_and_b32_e32 v10, 7, v10
+; GFX11-NEXT:    v_lshrrev_b32_e32 v1, 25, v1
 ; GFX11-NEXT:    v_and_b32_e32 v13, 7, v13
-; GFX11-NEXT:    v_lshrrev_b16 v1, 1, v1
+; GFX11-NEXT:    v_lshrrev_b16 v12, 1, v12
 ; GFX11-NEXT:    v_and_b32_e32 v2, 7, v2
 ; GFX11-NEXT:    v_or_b32_e32 v3, v3, v6
-; GFX11-NEXT:    v_lshlrev_b16 v4, v10, v4
-; GFX11-NEXT:    v_lshrrev_b16 v6, v9, v7
-; GFX11-NEXT:    v_lshlrev_b16 v5, v11, v5
-; GFX11-NEXT:    v_lshrrev_b16 v7, v13, v8
-; GFX11-NEXT:    v_lshlrev_b16 v0, v12, v0
-; GFX11-NEXT:    v_lshrrev_b16 v1, v2, v1
-; GFX11-NEXT:    v_and_b32_e32 v2, 0xff, v3
-; GFX11-NEXT:    v_or_b32_e32 v3, v4, v6
-; GFX11-NEXT:    v_or_b32_e32 v4, v5, v7
+; GFX11-NEXT:    v_lshlrev_b16 v4, v9, v4
+; GFX11-NEXT:    v_lshrrev_b16 v6, v8, v7
+; GFX11-NEXT:    v_lshlrev_b16 v5, v10, v5
+; GFX11-NEXT:    v_lshrrev_b16 v1, v13, v1
+; GFX11-NEXT:    v_lshlrev_b16 v0, v11, v0
+; GFX11-NEXT:    v_lshrrev_b16 v2, v2, v12
+; GFX11-NEXT:    v_and_b32_e32 v3, 0xff, v3
+; GFX11-NEXT:    v_or_b32_e32 v4, v4, v6
+; GFX11-NEXT:    v_or_b32_e32 v1, v5, v1
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_or_b32_e32 v0, v0, v1
-; GFX11-NEXT:    v_lshlrev_b32_e32 v1, 8, v2
+; GFX11-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX11-NEXT:    v_lshlrev_b32_e32 v2, 8, v3
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_and_b32_e32 v2, 0xff, v3
 ; GFX11-NEXT:    v_and_b32_e32 v3, 0xff, v4
+; GFX11-NEXT:    v_and_b32_e32 v1, 0xff, v1
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_and_or_b32 v0, 0xff, v0, v1
-; GFX11-NEXT:    v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-NEXT:    v_and_or_b32 v0, 0xff, v0, v2
+; GFX11-NEXT:    v_lshlrev_b32_e32 v2, 16, v3
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_lshlrev_b32_e32 v2, 24, v3
-; GFX11-NEXT:    v_or3_b32 v0, v0, v1, v2
+; GFX11-NEXT:    v_lshlrev_b32_e32 v1, 24, v1
+; GFX11-NEXT:    v_or3_b32 v0, v0, v2, v1
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %lhs = bitcast i32 %lhs.arg to <4 x i8>
   %rhs = bitcast i32 %rhs.arg to <4 x i8>
@@ -3679,22 +3673,21 @@ define amdgpu_ps i32 @s_fshl_v2i16(<2 x i16> inreg %lhs, <2 x i16> inreg %rhs, <
 ;
 ; GFX8-LABEL: s_fshl_v2i16:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_lshr_b32 s4, s1, 16
-; GFX8-NEXT:    s_and_b32 s1, 0xffff, s1
-; GFX8-NEXT:    s_lshr_b32 s5, s2, 16
-; GFX8-NEXT:    s_and_b32 s6, s2, 15
-; GFX8-NEXT:    s_andn2_b32 s2, 15, s2
-; GFX8-NEXT:    s_lshr_b32 s1, s1, 1
+; GFX8-NEXT:    s_and_b32 s5, s2, 15
 ; GFX8-NEXT:    s_lshr_b32 s3, s0, 16
-; GFX8-NEXT:    s_lshl_b32 s0, s0, s6
-; GFX8-NEXT:    s_lshr_b32 s1, s1, s2
-; GFX8-NEXT:    s_or_b32 s0, s0, s1
-; GFX8-NEXT:    s_and_b32 s1, s5, 15
-; GFX8-NEXT:    s_andn2_b32 s2, 15, s5
-; GFX8-NEXT:    s_lshl_b32 s1, s3, s1
-; GFX8-NEXT:    s_lshr_b32 s3, s4, 1
-; GFX8-NEXT:    s_lshr_b32 s2, s3, s2
-; GFX8-NEXT:    s_or_b32 s1, s1, s2
+; GFX8-NEXT:    s_lshl_b32 s0, s0, s5
+; GFX8-NEXT:    s_and_b32 s5, 0xffff, s1
+; GFX8-NEXT:    s_lshr_b32 s4, s2, 16
+; GFX8-NEXT:    s_andn2_b32 s2, 15, s2
+; GFX8-NEXT:    s_lshr_b32 s5, s5, 1
+; GFX8-NEXT:    s_lshr_b32 s2, s5, s2
+; GFX8-NEXT:    s_or_b32 s0, s0, s2
+; GFX8-NEXT:    s_and_b32 s2, s4, 15
+; GFX8-NEXT:    s_andn2_b32 s4, 15, s4
+; GFX8-NEXT:    s_lshr_b32 s1, s1, 17
+; GFX8-NEXT:    s_lshl_b32 s2, s3, s2
+; GFX8-NEXT:    s_lshr_b32 s1, s1, s4
+; GFX8-NEXT:    s_or_b32 s1, s2, s1
 ; GFX8-NEXT:    s_and_b32 s1, 0xffff, s1
 ; GFX8-NEXT:    s_and_b32 s0, 0xffff, s0
 ; GFX8-NEXT:    s_lshl_b32 s1, s1, 16
@@ -3806,13 +3799,12 @@ define <2 x i16> @v_fshl_v2i16(<2 x i16> %lhs, <2 x i16> %rhs, <2 x i16> %amt) {
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v4, v4, v5
 ; GFX8-NEXT:    v_or_b32_e32 v3, v3, v4
 ; GFX8-NEXT:    v_mov_b32_e32 v4, 15
-; GFX8-NEXT:    v_and_b32_sdwa v4, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_mov_b32_e32 v5, -1
+; GFX8-NEXT:    v_and_b32_sdwa v4, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_xor_b32_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    v_mov_b32_e32 v4, 1
 ; GFX8-NEXT:    v_and_b32_e32 v2, 15, v2
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b32_e32 v1, 17, v1
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v1, v2, v1
 ; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff, v0
@@ -3879,14 +3871,12 @@ define <2 x i16> @v_fshl_v2i16_4_8(<2 x i16> %lhs, <2 x i16> %rhs) {
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
-; GFX8-NEXT:    v_lshlrev_b16_e32 v0, 4, v0
-; GFX8-NEXT:    v_lshrrev_b16_e32 v3, 12, v1
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v3
-; GFX8-NEXT:    v_mov_b32_e32 v3, 8
 ; GFX8-NEXT:    v_lshlrev_b16_e32 v2, 8, v2
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v3, 12, v1
+; GFX8-NEXT:    v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT:    v_lshlrev_b16_e32 v0, 4, v0
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v3
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX8-NEXT:    v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
@@ -3957,11 +3947,10 @@ define amdgpu_ps float @v_fshl_v2i16_ssv(<2 x i16> inreg %lhs, <2 x i16> inreg %
 ; GFX8-NEXT:    v_or_b32_e32 v1, v1, v2
 ; GFX8-NEXT:    v_mov_b32_e32 v2, 15
 ; GFX8-NEXT:    v_mov_b32_e32 v3, -1
-; GFX8-NEXT:    s_lshr_b32 s3, s1, 16
 ; GFX8-NEXT:    v_and_b32_sdwa v2, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_xor_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_and_b32_e32 v0, 15, v0
-; GFX8-NEXT:    s_lshr_b32 s0, s3, 1
+; GFX8-NEXT:    s_lshr_b32 s0, s1, 17
 ; GFX8-NEXT:    v_lshlrev_b16_e64 v2, v2, s2
 ; GFX8-NEXT:    v_lshrrev_b16_e64 v0, v0, s0
 ; GFX8-NEXT:    v_or_b32_e32 v0, v2, v0
@@ -4051,11 +4040,10 @@ define amdgpu_ps float @v_fshl_v2i16_svs(<2 x i16> inreg %lhs, <2 x i16> %rhs, <
 ; GFX8-NEXT:    s_lshr_b32 s2, s0, 16
 ; GFX8-NEXT:    s_lshl_b32 s0, s0, s4
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v1, s1, v1
-; GFX8-NEXT:    v_mov_b32_e32 v2, 1
 ; GFX8-NEXT:    v_or_b32_e32 v1, s0, v1
 ; GFX8-NEXT:    s_and_b32 s0, s3, 15
 ; GFX8-NEXT:    s_andn2_b32 s1, 15, s3
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b32_e32 v0, 17, v0
 ; GFX8-NEXT:    s_lshl_b32 s0, s2, s0
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v0, s1, v0
 ; GFX8-NEXT:    v_or_b32_e32 v0, s0, v0
@@ -4135,21 +4123,20 @@ define amdgpu_ps float @v_fshl_v2i16_vss(<2 x i16> %lhs, <2 x i16> inreg %rhs, <
 ;
 ; GFX8-LABEL: v_fshl_v2i16_vss:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_lshr_b32 s2, s0, 16
-; GFX8-NEXT:    s_and_b32 s0, 0xffff, s0
-; GFX8-NEXT:    s_lshr_b32 s3, s1, 16
-; GFX8-NEXT:    s_and_b32 s4, s1, 15
+; GFX8-NEXT:    s_and_b32 s3, s1, 15
+; GFX8-NEXT:    v_lshlrev_b16_e32 v1, s3, v0
+; GFX8-NEXT:    s_and_b32 s3, 0xffff, s0
+; GFX8-NEXT:    s_lshr_b32 s2, s1, 16
 ; GFX8-NEXT:    s_andn2_b32 s1, 15, s1
-; GFX8-NEXT:    s_lshr_b32 s0, s0, 1
-; GFX8-NEXT:    v_lshlrev_b16_e32 v1, s4, v0
-; GFX8-NEXT:    s_lshr_b32 s0, s0, s1
-; GFX8-NEXT:    v_or_b32_e32 v1, s0, v1
-; GFX8-NEXT:    s_and_b32 s0, s3, 15
-; GFX8-NEXT:    s_andn2_b32 s1, 15, s3
-; GFX8-NEXT:    v_mov_b32_e32 v2, s0
-; GFX8-NEXT:    s_lshr_b32 s0, s2, 1
+; GFX8-NEXT:    s_lshr_b32 s3, s3, 1
+; GFX8-NEXT:    s_lshr_b32 s1, s3, s1
+; GFX8-NEXT:    v_or_b32_e32 v1, s1, v1
+; GFX8-NEXT:    s_and_b32 s1, s2, 15
+; GFX8-NEXT:    s_andn2_b32 s2, 15, s2
+; GFX8-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NEXT:    s_lshr_b32 s0, s0, 17
 ; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    s_lshr_b32 s0, s0, s1
+; GFX8-NEXT:    s_lshr_b32 s0, s0, s2
 ; GFX8-NEXT:    v_or_b32_e32 v0, s0, v0
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff, v0
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
@@ -4249,23 +4236,22 @@ define amdgpu_ps i48 @s_fshl_v3i16(<3 x i16> inreg %lhs, <3 x i16> inreg %rhs, <
 ;
 ; GFX8-LABEL: s_fshl_v3i16:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_lshr_b32 s7, s2, 16
-; GFX8-NEXT:    s_and_b32 s2, 0xffff, s2
-; GFX8-NEXT:    s_lshr_b32 s8, s4, 16
-; GFX8-NEXT:    s_and_b32 s9, s4, 15
-; GFX8-NEXT:    s_andn2_b32 s4, 15, s4
-; GFX8-NEXT:    s_lshr_b32 s2, s2, 1
+; GFX8-NEXT:    s_and_b32 s8, s4, 15
 ; GFX8-NEXT:    s_lshr_b32 s6, s0, 16
-; GFX8-NEXT:    s_lshl_b32 s0, s0, s9
-; GFX8-NEXT:    s_lshr_b32 s2, s2, s4
-; GFX8-NEXT:    s_or_b32 s0, s0, s2
-; GFX8-NEXT:    s_and_b32 s2, s8, 15
-; GFX8-NEXT:    s_andn2_b32 s4, 15, s8
-; GFX8-NEXT:    s_lshl_b32 s2, s6, s2
-; GFX8-NEXT:    s_lshr_b32 s6, s7, 1
-; GFX8-NEXT:    s_lshr_b32 s4, s6, s4
+; GFX8-NEXT:    s_lshl_b32 s0, s0, s8
+; GFX8-NEXT:    s_and_b32 s8, 0xffff, s2
+; GFX8-NEXT:    s_lshr_b32 s7, s4, 16
+; GFX8-NEXT:    s_andn2_b32 s4, 15, s4
+; GFX8-NEXT:    s_lshr_b32 s8, s8, 1
+; GFX8-NEXT:    s_lshr_b32 s4, s8, s4
+; GFX8-NEXT:    s_or_b32 s0, s0, s4
+; GFX8-NEXT:    s_and_b32 s4, s7, 15
+; GFX8-NEXT:    s_andn2_b32 s7, 15, s7
+; GFX8-NEXT:    s_lshr_b32 s2, s2, 17
+; GFX8-NEXT:    s_lshl_b32 s4, s6, s4
+; GFX8-NEXT:    s_lshr_b32 s2, s2, s7
 ; GFX8-NEXT:    s_and_b32 s3, 0xffff, s3
-; GFX8-NEXT:    s_or_b32 s2, s2, s4
+; GFX8-NEXT:    s_or_b32 s2, s4, s2
 ; GFX8-NEXT:    s_and_b32 s4, s5, 15
 ; GFX8-NEXT:    s_andn2_b32 s5, 15, s5
 ; GFX8-NEXT:    s_lshr_b32 s3, s3, 1
@@ -4462,13 +4448,12 @@ define <3 x half> @v_fshl_v3i16(<3 x i16> %lhs, <3 x i16> %rhs, <3 x i16> %amt)
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v7, v7, v8
 ; GFX8-NEXT:    v_or_b32_e32 v6, v6, v7
 ; GFX8-NEXT:    v_mov_b32_e32 v7, 15
-; GFX8-NEXT:    v_and_b32_sdwa v7, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_mov_b32_e32 v8, -1
+; GFX8-NEXT:    v_and_b32_sdwa v7, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_xor_b32_sdwa v4, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    v_mov_b32_e32 v7, 1
 ; GFX8-NEXT:    v_and_b32_e32 v4, 15, v4
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v2, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 17, v2
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v2, v4, v2
 ; GFX8-NEXT:    v_or_b32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_and_b32_e32 v2, 15, v5
@@ -4586,39 +4571,37 @@ define amdgpu_ps <2 x i32> @s_fshl_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %
 ;
 ; GFX8-LABEL: s_fshl_v4i16:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_lshr_b32 s8, s2, 16
-; GFX8-NEXT:    s_and_b32 s2, 0xffff, s2
-; GFX8-NEXT:    s_lshr_b32 s10, s4, 16
-; GFX8-NEXT:    s_and_b32 s12, s4, 15
-; GFX8-NEXT:    s_andn2_b32 s4, 15, s4
-; GFX8-NEXT:    s_lshr_b32 s2, s2, 1
+; GFX8-NEXT:    s_and_b32 s10, s4, 15
 ; GFX8-NEXT:    s_lshr_b32 s6, s0, 16
-; GFX8-NEXT:    s_lshl_b32 s0, s0, s12
-; GFX8-NEXT:    s_lshr_b32 s2, s2, s4
-; GFX8-NEXT:    s_or_b32 s0, s0, s2
-; GFX8-NEXT:    s_and_b32 s2, s10, 15
-; GFX8-NEXT:    s_andn2_b32 s4, 15, s10
-; GFX8-NEXT:    s_lshl_b32 s2, s6, s2
-; GFX8-NEXT:    s_lshr_b32 s6, s8, 1
-; GFX8-NEXT:    s_lshr_b32 s9, s3, 16
-; GFX8-NEXT:    s_lshr_b32 s4, s6, s4
-; GFX8-NEXT:    s_and_b32 s3, 0xffff, s3
-; GFX8-NEXT:    s_lshr_b32 s11, s5, 16
-; GFX8-NEXT:    s_or_b32 s2, s2, s4
+; GFX8-NEXT:    s_lshl_b32 s0, s0, s10
+; GFX8-NEXT:    s_and_b32 s10, 0xffff, s2
+; GFX8-NEXT:    s_lshr_b32 s8, s4, 16
+; GFX8-NEXT:    s_andn2_b32 s4, 15, s4
+; GFX8-NEXT:    s_lshr_b32 s10, s10, 1
+; GFX8-NEXT:    s_lshr_b32 s4, s10, s4
+; GFX8-NEXT:    s_or_b32 s0, s0, s4
+; GFX8-NEXT:    s_and_b32 s4, s8, 15
+; GFX8-NEXT:    s_andn2_b32 s8, 15, s8
+; GFX8-NEXT:    s_lshr_b32 s2, s2, 17
+; GFX8-NEXT:    s_lshl_b32 s4, s6, s4
+; GFX8-NEXT:    s_lshr_b32 s2, s2, s8
+; GFX8-NEXT:    s_or_b32 s2, s4, s2
 ; GFX8-NEXT:    s_and_b32 s4, s5, 15
-; GFX8-NEXT:    s_andn2_b32 s5, 15, s5
-; GFX8-NEXT:    s_lshr_b32 s3, s3, 1
 ; GFX8-NEXT:    s_lshr_b32 s7, s1, 16
 ; GFX8-NEXT:    s_lshl_b32 s1, s1, s4
+; GFX8-NEXT:    s_and_b32 s4, 0xffff, s3
+; GFX8-NEXT:    s_lshr_b32 s9, s5, 16
+; GFX8-NEXT:    s_andn2_b32 s5, 15, s5
+; GFX8-NEXT:    s_lshr_b32 s4, s4, 1
+; GFX8-NEXT:    s_lshr_b32 s4, s4, s5
+; GFX8-NEXT:    s_or_b32 s1, s1, s4
+; GFX8-NEXT:    s_and_b32 s4, s9, 15
+; GFX8-NEXT:    s_andn2_b32 s5, 15, s9
+; GFX8-NEXT:    s_lshr_b32 s3, s3, 17
+; GFX8-NEXT:    s_lshl_b32 s4, s7, s4
 ; GFX8-NEXT:    s_lshr_b32 s3, s3, s5
-; GFX8-NEXT:    s_or_b32 s1, s1, s3
-; GFX8-NEXT:    s_and_b32 s3, s11, 15
-; GFX8-NEXT:    s_andn2_b32 s4, 15, s11
-; GFX8-NEXT:    s_lshr_b32 s5, s9, 1
-; GFX8-NEXT:    s_lshl_b32 s3, s7, s3
-; GFX8-NEXT:    s_lshr_b32 s4, s5, s4
 ; GFX8-NEXT:    s_and_b32 s2, 0xffff, s2
-; GFX8-NEXT:    s_or_b32 s3, s3, s4
+; GFX8-NEXT:    s_or_b32 s3, s4, s3
 ; GFX8-NEXT:    s_and_b32 s0, 0xffff, s0
 ; GFX8-NEXT:    s_lshl_b32 s2, s2, 16
 ; GFX8-NEXT:    s_or_b32 s0, s0, s2
@@ -4803,26 +4786,25 @@ define <4 x half> @v_fshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs, <4 x i16> %amt)
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v7, v7, v8
 ; GFX8-NEXT:    v_or_b32_e32 v6, v6, v7
 ; GFX8-NEXT:    v_mov_b32_e32 v7, 15
-; GFX8-NEXT:    v_and_b32_sdwa v8, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_mov_b32_e32 v9, -1
+; GFX8-NEXT:    v_and_b32_sdwa v8, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_xor_b32_sdwa v4, v4, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    v_mov_b32_e32 v8, 1
 ; GFX8-NEXT:    v_and_b32_e32 v4, 15, v4
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v2, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 17, v2
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v2, v4, v2
 ; GFX8-NEXT:    v_xor_b32_e32 v4, -1, v5
 ; GFX8-NEXT:    v_or_b32_e32 v0, v0, v2
 ; GFX8-NEXT:    v_and_b32_e32 v2, 15, v5
 ; GFX8-NEXT:    v_and_b32_e32 v4, 15, v4
-; GFX8-NEXT:    v_lshrrev_b16_e32 v10, 1, v3
+; GFX8-NEXT:    v_lshrrev_b16_e32 v8, 1, v3
 ; GFX8-NEXT:    v_lshlrev_b16_e32 v2, v2, v1
-; GFX8-NEXT:    v_lshrrev_b16_e32 v4, v4, v10
+; GFX8-NEXT:    v_lshrrev_b16_e32 v4, v4, v8
 ; GFX8-NEXT:    v_or_b32_e32 v2, v2, v4
 ; GFX8-NEXT:    v_and_b32_sdwa v4, v5, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_xor_b32_sdwa v5, v5, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ; GFX8-NEXT:    v_and_b32_e32 v5, 15, v5
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v3, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b32_e32 v3, 17, v3
 ; GFX8-NEXT:    v_lshlrev_b16_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v3, v5, v3
 ; GFX8-NEXT:    v_or_b32_e32 v1, v1, v3

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
index b74c6eeabb239..ea6b3a3ad7866 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
@@ -3623,14 +3623,12 @@ define <2 x i16> @v_fshr_v2i16_4_8(<2 x i16> %lhs, <2 x i16> %rhs) {
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX8-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
-; GFX8-NEXT:    v_lshlrev_b16_e32 v0, 12, v0
-; GFX8-NEXT:    v_lshrrev_b16_e32 v3, 4, v1
-; GFX8-NEXT:    v_or_b32_e32 v0, v0, v3
-; GFX8-NEXT:    v_mov_b32_e32 v3, 8
 ; GFX8-NEXT:    v_lshlrev_b16_e32 v2, 8, v2
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v3, 4, v1
+; GFX8-NEXT:    v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
+; GFX8-NEXT:    v_lshlrev_b16_e32 v0, 12, v0
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v3
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX8-NEXT:    v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
index 6baa10bb48621..8533e34ff13f8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
@@ -807,10 +807,10 @@ define <2 x i16> @v_lshr_v2i16_15(<2 x i16> %value) {
 ; GFX8-LABEL: v_lshr_v2i16_15:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_mov_b32_e32 v2, 15
-; GFX8-NEXT:    v_lshrrev_b16_e32 v1, 15, v0
-; GFX8-NEXT:    v_lshrrev_b16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    v_mov_b32_e32 v1, 31
+; GFX8-NEXT:    v_lshrrev_b32_sdwa v1, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT:    v_lshrrev_b16_e32 v0, 15, v0
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_lshr_v2i16_15:

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.128.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.128.ll
index 38ef707fa65a2..3685eed5043a3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.128.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.128.ll
@@ -71,14 +71,14 @@ define amdgpu_kernel void @store_lds_v4i32_align1(ptr addrspace(3) %out, <4 x i3
 ; GFX9-NEXT:    s_load_dword s6, s[4:5], 0x0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    s_and_b32 s5, 0xffff, s0
-; GFX9-NEXT:    s_lshr_b32 s4, s0, 16
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s6
-; GFX9-NEXT:    s_lshr_b32 s0, s5, 8
+; GFX9-NEXT:    s_lshr_b32 s5, s5, 8
+; GFX9-NEXT:    s_lshr_b32 s4, s0, 16
 ; GFX9-NEXT:    ds_write_b8 v1, v0
-; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v0, s5
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:1
-; GFX9-NEXT:    s_lshr_b32 s0, s4, 8
+; GFX9-NEXT:    s_lshr_b32 s0, s0, 24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:2
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
@@ -90,7 +90,7 @@ define amdgpu_kernel void @store_lds_v4i32_align1(ptr addrspace(3) %out, <4 x i3
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:4
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:5
-; GFX9-NEXT:    s_lshr_b32 s1, s0, 8
+; GFX9-NEXT:    s_lshr_b32 s1, s1, 24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:6
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
@@ -102,7 +102,7 @@ define amdgpu_kernel void @store_lds_v4i32_align1(ptr addrspace(3) %out, <4 x i3
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:8
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:9
-; GFX9-NEXT:    s_lshr_b32 s1, s0, 8
+; GFX9-NEXT:    s_lshr_b32 s1, s2, 24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:10
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
@@ -114,7 +114,7 @@ define amdgpu_kernel void @store_lds_v4i32_align1(ptr addrspace(3) %out, <4 x i3
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:12
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:13
-; GFX9-NEXT:    s_lshr_b32 s1, s0, 8
+; GFX9-NEXT:    s_lshr_b32 s1, s3, 24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:14
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
@@ -181,37 +181,37 @@ define amdgpu_kernel void @store_lds_v4i32_align1(ptr addrspace(3) %out, <4 x i3
 ; GFX10-NEXT:    s_load_dword s6, s[4:5], 0x0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_lshr_b32 s4, s0, 16
-; GFX10-NEXT:    s_and_b32 s5, 0xffff, s0
 ; GFX10-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s6
-; GFX10-NEXT:    s_lshr_b32 s0, s1, 16
-; GFX10-NEXT:    s_and_b32 s6, 0xffff, s1
+; GFX10-NEXT:    s_and_b32 s5, 0xffff, s0
+; GFX10-NEXT:    s_lshr_b32 s0, s0, 24
+; GFX10-NEXT:    s_and_b32 s7, 0xffff, s1
 ; GFX10-NEXT:    v_mov_b32_e32 v2, s1
-; GFX10-NEXT:    s_lshr_b32 s1, s2, 16
-; GFX10-NEXT:    s_and_b32 s7, 0xffff, s2
-; GFX10-NEXT:    v_mov_b32_e32 v3, s2
-; GFX10-NEXT:    s_lshr_b32 s2, s5, 8
+; GFX10-NEXT:    s_lshr_b32 s6, s1, 16
 ; GFX10-NEXT:    v_mov_b32_e32 v4, s4
-; GFX10-NEXT:    s_lshr_b32 s5, s4, 8
-; GFX10-NEXT:    s_lshr_b32 s4, s6, 8
-; GFX10-NEXT:    s_lshr_b32 s6, s0, 8
+; GFX10-NEXT:    s_lshr_b32 s1, s1, 24
+; GFX10-NEXT:    s_lshr_b32 s8, s2, 16
+; GFX10-NEXT:    s_and_b32 s9, 0xffff, s2
+; GFX10-NEXT:    s_lshr_b32 s5, s5, 8
 ; GFX10-NEXT:    v_mov_b32_e32 v5, s0
-; GFX10-NEXT:    v_mov_b32_e32 v6, s2
 ; GFX10-NEXT:    s_lshr_b32 s0, s7, 8
-; GFX10-NEXT:    v_mov_b32_e32 v7, s5
-; GFX10-NEXT:    v_mov_b32_e32 v8, s4
-; GFX10-NEXT:    v_mov_b32_e32 v9, s6
+; GFX10-NEXT:    v_mov_b32_e32 v6, s6
+; GFX10-NEXT:    v_mov_b32_e32 v7, s1
+; GFX10-NEXT:    s_lshr_b32 s1, s9, 8
+; GFX10-NEXT:    v_mov_b32_e32 v8, s5
+; GFX10-NEXT:    v_mov_b32_e32 v9, s0
 ; GFX10-NEXT:    ds_write_b8 v1, v0
 ; GFX10-NEXT:    ds_write_b8 v1, v2 offset:4
 ; GFX10-NEXT:    ds_write_b8 v1, v4 offset:2
-; GFX10-NEXT:    ds_write_b8 v1, v5 offset:6
-; GFX10-NEXT:    ds_write_b8 v1, v6 offset:1
-; GFX10-NEXT:    ds_write_b8 v1, v7 offset:3
-; GFX10-NEXT:    ds_write_b8 v1, v8 offset:5
-; GFX10-NEXT:    v_mov_b32_e32 v0, s1
-; GFX10-NEXT:    v_mov_b32_e32 v10, s0
-; GFX10-NEXT:    s_lshr_b32 s0, s1, 8
-; GFX10-NEXT:    ds_write_b8 v1, v9 offset:7
+; GFX10-NEXT:    ds_write_b8 v1, v5 offset:3
+; GFX10-NEXT:    ds_write_b8 v1, v6 offset:6
+; GFX10-NEXT:    ds_write_b8 v1, v8 offset:1
+; GFX10-NEXT:    ds_write_b8 v1, v9 offset:5
+; GFX10-NEXT:    v_mov_b32_e32 v0, s8
+; GFX10-NEXT:    v_mov_b32_e32 v3, s2
+; GFX10-NEXT:    v_mov_b32_e32 v10, s1
+; GFX10-NEXT:    s_lshr_b32 s0, s2, 24
+; GFX10-NEXT:    ds_write_b8 v1, v7 offset:7
 ; GFX10-NEXT:    ds_write_b8 v1, v3 offset:8
 ; GFX10-NEXT:    ds_write_b8 v1, v10 offset:9
 ; GFX10-NEXT:    ds_write_b8 v1, v0 offset:10
@@ -221,7 +221,7 @@ define amdgpu_kernel void @store_lds_v4i32_align1(ptr addrspace(3) %out, <4 x i3
 ; GFX10-NEXT:    s_lshr_b32 s0, s0, 8
 ; GFX10-NEXT:    v_mov_b32_e32 v2, s3
 ; GFX10-NEXT:    v_mov_b32_e32 v3, s0
-; GFX10-NEXT:    s_lshr_b32 s0, s1, 8
+; GFX10-NEXT:    s_lshr_b32 s0, s3, 24
 ; GFX10-NEXT:    v_mov_b32_e32 v4, s1
 ; GFX10-NEXT:    v_mov_b32_e32 v5, s0
 ; GFX10-NEXT:    ds_write_b8 v1, v0 offset:11
@@ -240,38 +240,37 @@ define amdgpu_kernel void @store_lds_v4i32_align1(ptr addrspace(3) %out, <4 x i3
 ; GFX11-NEXT:    s_and_b32 s6, 0xffff, s0
 ; GFX11-NEXT:    s_lshr_b32 s5, s0, 16
 ; GFX11-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s4
-; GFX11-NEXT:    s_lshr_b32 s0, s1, 16
-; GFX11-NEXT:    s_and_b32 s4, 0xffff, s1
+; GFX11-NEXT:    s_lshr_b32 s0, s0, 24
+; GFX11-NEXT:    s_lshr_b32 s4, s1, 16
+; GFX11-NEXT:    s_and_b32 s7, 0xffff, s1
+; GFX11-NEXT:    s_lshr_b32 s6, s6, 8
 ; GFX11-NEXT:    v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v3, s2
-; GFX11-NEXT:    s_lshr_b32 s1, s2, 16
-; GFX11-NEXT:    s_and_b32 s7, 0xffff, s2
-; GFX11-NEXT:    s_lshr_b32 s2, s6, 8
-; GFX11-NEXT:    s_lshr_b32 s6, s5, 8
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT:    v_dual_mov_b32 v6, s2 :: v_dual_mov_b32 v7, s6
+; GFX11-NEXT:    s_lshr_b32 s1, s1, 24
 ; GFX11-NEXT:    v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s0
-; GFX11-NEXT:    s_lshr_b32 s4, s4, 8
-; GFX11-NEXT:    s_lshr_b32 s5, s0, 8
 ; GFX11-NEXT:    s_lshr_b32 s0, s7, 8
-; GFX11-NEXT:    v_dual_mov_b32 v8, s4 :: v_dual_mov_b32 v9, s5
+; GFX11-NEXT:    v_dual_mov_b32 v6, s4 :: v_dual_mov_b32 v7, s6
+; GFX11-NEXT:    s_and_b32 s9, 0xffff, s2
+; GFX11-NEXT:    v_dual_mov_b32 v8, s1 :: v_dual_mov_b32 v9, s0
+; GFX11-NEXT:    s_lshr_b32 s0, s2, 24
+; GFX11-NEXT:    s_lshr_b32 s1, s9, 8
 ; GFX11-NEXT:    ds_store_b8 v1, v0
-; GFX11-NEXT:    ds_store_b8 v1, v6 offset:1
+; GFX11-NEXT:    ds_store_b8 v1, v7 offset:1
 ; GFX11-NEXT:    ds_store_b8 v1, v4 offset:2
-; GFX11-NEXT:    ds_store_b8 v1, v7 offset:3
+; GFX11-NEXT:    ds_store_b8 v1, v5 offset:3
 ; GFX11-NEXT:    ds_store_b8 v1, v2 offset:4
-; GFX11-NEXT:    ds_store_b8 v1, v8 offset:5
-; GFX11-NEXT:    ds_store_b8 v1, v5 offset:6
-; GFX11-NEXT:    ds_store_b8 v1, v9 offset:7
-; GFX11-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v5, s3
-; GFX11-NEXT:    s_lshr_b32 s0, s1, 8
-; GFX11-NEXT:    v_mov_b32_e32 v2, s1
+; GFX11-NEXT:    ds_store_b8 v1, v9 offset:5
+; GFX11-NEXT:    ds_store_b8 v1, v6 offset:6
+; GFX11-NEXT:    ds_store_b8 v1, v8 offset:7
 ; GFX11-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX11-NEXT:    s_and_b32 s0, 0xffff, s3
-; GFX11-NEXT:    s_lshr_b32 s1, s3, 16
+; GFX11-NEXT:    s_lshr_b32 s8, s2, 16
+; GFX11-NEXT:    v_dual_mov_b32 v0, s1 :: v_dual_mov_b32 v5, s3
 ; GFX11-NEXT:    s_lshr_b32 s0, s0, 8
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    v_dual_mov_b32 v7, s1 :: v_dual_mov_b32 v6, s0
-; GFX11-NEXT:    s_lshr_b32 s0, s1, 8
+; GFX11-NEXT:    s_lshr_b32 s1, s3, 16
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v7, s1
+; GFX11-NEXT:    v_mov_b32_e32 v6, s0
+; GFX11-NEXT:    s_lshr_b32 s0, s3, 24
 ; GFX11-NEXT:    v_mov_b32_e32 v8, s0
 ; GFX11-NEXT:    ds_store_b8 v1, v3 offset:8
 ; GFX11-NEXT:    ds_store_b8 v1, v0 offset:9

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.96.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.96.ll
index 1d2d330eeb61a..cce6bd9301cbf 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.96.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.96.ll
@@ -72,15 +72,15 @@ define amdgpu_kernel void @store_lds_v3i32_align1(ptr addrspace(3) %out, <3 x i3
 ; GFX9-NEXT:    s_load_dword s3, s[4:5], 0x0
 ; GFX9-NEXT:    ; kill: killed $sgpr4_sgpr5
 ; GFX9-NEXT:    s_and_b32 s5, 0xffff, s0
-; GFX9-NEXT:    s_lshr_b32 s4, s0, 16
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s3
-; GFX9-NEXT:    s_lshr_b32 s0, s5, 8
+; GFX9-NEXT:    s_lshr_b32 s3, s5, 8
+; GFX9-NEXT:    s_lshr_b32 s4, s0, 16
 ; GFX9-NEXT:    ds_write_b8 v1, v0
-; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v0, s3
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:1
-; GFX9-NEXT:    s_lshr_b32 s0, s4, 8
+; GFX9-NEXT:    s_lshr_b32 s0, s0, 24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:2
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
@@ -92,7 +92,7 @@ define amdgpu_kernel void @store_lds_v3i32_align1(ptr addrspace(3) %out, <3 x i3
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:4
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s3
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:5
-; GFX9-NEXT:    s_lshr_b32 s1, s0, 8
+; GFX9-NEXT:    s_lshr_b32 s1, s1, 24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:6
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
@@ -104,7 +104,7 @@ define amdgpu_kernel void @store_lds_v3i32_align1(ptr addrspace(3) %out, <3 x i3
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:8
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:9
-; GFX9-NEXT:    s_lshr_b32 s1, s0, 8
+; GFX9-NEXT:    s_lshr_b32 s1, s2, 24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    ds_write_b8 v1, v0 offset:10
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
@@ -163,37 +163,37 @@ define amdgpu_kernel void @store_lds_v3i32_align1(ptr addrspace(3) %out, <3 x i3
 ; GFX10-NEXT:    s_lshr_b32 s4, s0, 16
 ; GFX10-NEXT:    s_and_b32 s5, 0xffff, s0
 ; GFX10-NEXT:    v_mov_b32_e32 v0, s0
-; GFX10-NEXT:    s_lshr_b32 s0, s1, 16
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s3
-; GFX10-NEXT:    s_and_b32 s3, 0xffff, s1
+; GFX10-NEXT:    s_lshr_b32 s0, s0, 24
+; GFX10-NEXT:    s_lshr_b32 s3, s1, 16
+; GFX10-NEXT:    s_and_b32 s6, 0xffff, s1
 ; GFX10-NEXT:    v_mov_b32_e32 v2, s1
-; GFX10-NEXT:    s_lshr_b32 s1, s2, 16
-; GFX10-NEXT:    s_and_b32 s6, 0xffff, s2
-; GFX10-NEXT:    v_mov_b32_e32 v3, s2
-; GFX10-NEXT:    s_lshr_b32 s2, s5, 8
-; GFX10-NEXT:    s_lshr_b32 s5, s4, 8
+; GFX10-NEXT:    s_lshr_b32 s1, s1, 24
+; GFX10-NEXT:    s_and_b32 s8, 0xffff, s2
 ; GFX10-NEXT:    v_mov_b32_e32 v4, s4
-; GFX10-NEXT:    s_lshr_b32 s4, s0, 8
 ; GFX10-NEXT:    v_mov_b32_e32 v5, s0
 ; GFX10-NEXT:    s_lshr_b32 s0, s6, 8
-; GFX10-NEXT:    v_mov_b32_e32 v9, s4
-; GFX10-NEXT:    s_lshr_b32 s3, s3, 8
-; GFX10-NEXT:    v_mov_b32_e32 v6, s2
-; GFX10-NEXT:    v_mov_b32_e32 v10, s0
-; GFX10-NEXT:    s_lshr_b32 s0, s1, 8
-; GFX10-NEXT:    v_mov_b32_e32 v7, s5
-; GFX10-NEXT:    v_mov_b32_e32 v8, s3
+; GFX10-NEXT:    v_mov_b32_e32 v7, s1
+; GFX10-NEXT:    s_lshr_b32 s1, s8, 8
+; GFX10-NEXT:    s_lshr_b32 s7, s2, 16
+; GFX10-NEXT:    v_mov_b32_e32 v3, s2
+; GFX10-NEXT:    s_lshr_b32 s5, s5, 8
+; GFX10-NEXT:    v_mov_b32_e32 v6, s3
+; GFX10-NEXT:    v_mov_b32_e32 v9, s0
+; GFX10-NEXT:    v_mov_b32_e32 v10, s1
+; GFX10-NEXT:    s_lshr_b32 s0, s2, 24
+; GFX10-NEXT:    v_mov_b32_e32 v8, s5
 ; GFX10-NEXT:    ds_write_b8 v1, v0
 ; GFX10-NEXT:    ds_write_b8 v1, v2 offset:4
 ; GFX10-NEXT:    ds_write_b8 v1, v4 offset:2
-; GFX10-NEXT:    ds_write_b8 v1, v5 offset:6
-; GFX10-NEXT:    ds_write_b8 v1, v6 offset:1
-; GFX10-NEXT:    ds_write_b8 v1, v7 offset:3
-; GFX10-NEXT:    ds_write_b8 v1, v8 offset:5
-; GFX10-NEXT:    v_mov_b32_e32 v0, s1
+; GFX10-NEXT:    ds_write_b8 v1, v5 offset:3
+; GFX10-NEXT:    ds_write_b8 v1, v6 offset:6
+; GFX10-NEXT:    ds_write_b8 v1, v8 offset:1
+; GFX10-NEXT:    ds_write_b8 v1, v9 offset:5
+; GFX10-NEXT:    v_mov_b32_e32 v0, s7
 ; GFX10-NEXT:    v_mov_b32_e32 v2, s0
-; GFX10-NEXT:    ds_write_b8 v1, v9 offset:7
+; GFX10-NEXT:    ds_write_b8 v1, v7 offset:7
 ; GFX10-NEXT:    ds_write_b8 v1, v3 offset:8
 ; GFX10-NEXT:    ds_write_b8 v1, v10 offset:9
 ; GFX10-NEXT:    ds_write_b8 v1, v0 offset:10
@@ -206,37 +206,37 @@ define amdgpu_kernel void @store_lds_v3i32_align1(ptr addrspace(3) %out, <3 x i3
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_load_b32 s3, s[4:5], 0x0
 ; GFX11-NEXT:    s_and_b32 s5, 0xffff, s0
+; GFX11-NEXT:    s_lshr_b32 s7, s2, 16
+; GFX11-NEXT:    s_lshr_b32 s5, s5, 8
 ; GFX11-NEXT:    s_lshr_b32 s4, s0, 16
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s3
-; GFX11-NEXT:    s_lshr_b32 s0, s1, 16
-; GFX11-NEXT:    s_and_b32 s3, 0xffff, s1
+; GFX11-NEXT:    s_lshr_b32 s0, s0, 24
+; GFX11-NEXT:    s_lshr_b32 s3, s1, 16
+; GFX11-NEXT:    s_and_b32 s6, 0xffff, s1
 ; GFX11-NEXT:    v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v3, s2
-; GFX11-NEXT:    s_lshr_b32 s1, s2, 16
-; GFX11-NEXT:    s_and_b32 s6, 0xffff, s2
-; GFX11-NEXT:    s_lshr_b32 s2, s5, 8
-; GFX11-NEXT:    s_lshr_b32 s5, s4, 8
-; GFX11-NEXT:    v_dual_mov_b32 v6, s1 :: v_dual_mov_b32 v7, s2
+; GFX11-NEXT:    s_lshr_b32 s1, s1, 24
+; GFX11-NEXT:    s_and_b32 s8, 0xffff, s2
+; GFX11-NEXT:    v_dual_mov_b32 v8, s7 :: v_dual_mov_b32 v9, s5
 ; GFX11-NEXT:    v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s0
-; GFX11-NEXT:    s_lshr_b32 s3, s3, 8
-; GFX11-NEXT:    s_lshr_b32 s4, s0, 8
+; GFX11-NEXT:    s_lshr_b32 s2, s2, 24
 ; GFX11-NEXT:    s_lshr_b32 s0, s6, 8
-; GFX11-NEXT:    s_lshr_b32 s6, s1, 8
-; GFX11-NEXT:    v_dual_mov_b32 v8, s5 :: v_dual_mov_b32 v9, s3
-; GFX11-NEXT:    v_dual_mov_b32 v10, s4 :: v_dual_mov_b32 v11, s0
-; GFX11-NEXT:    v_mov_b32_e32 v12, s6
+; GFX11-NEXT:    v_dual_mov_b32 v6, s3 :: v_dual_mov_b32 v7, s1
+; GFX11-NEXT:    s_lshr_b32 s1, s8, 8
+; GFX11-NEXT:    v_dual_mov_b32 v10, s2 :: v_dual_mov_b32 v11, s0
+; GFX11-NEXT:    v_mov_b32_e32 v12, s1
 ; GFX11-NEXT:    ds_store_b8 v1, v0
-; GFX11-NEXT:    ds_store_b8 v1, v7 offset:1
+; GFX11-NEXT:    ds_store_b8 v1, v9 offset:1
 ; GFX11-NEXT:    ds_store_b8 v1, v4 offset:2
-; GFX11-NEXT:    ds_store_b8 v1, v8 offset:3
+; GFX11-NEXT:    ds_store_b8 v1, v5 offset:3
 ; GFX11-NEXT:    ds_store_b8 v1, v2 offset:4
-; GFX11-NEXT:    ds_store_b8 v1, v9 offset:5
-; GFX11-NEXT:    ds_store_b8 v1, v5 offset:6
-; GFX11-NEXT:    ds_store_b8 v1, v10 offset:7
+; GFX11-NEXT:    ds_store_b8 v1, v11 offset:5
+; GFX11-NEXT:    ds_store_b8 v1, v6 offset:6
+; GFX11-NEXT:    ds_store_b8 v1, v7 offset:7
 ; GFX11-NEXT:    ds_store_b8 v1, v3 offset:8
-; GFX11-NEXT:    ds_store_b8 v1, v11 offset:9
-; GFX11-NEXT:    ds_store_b8 v1, v6 offset:10
-; GFX11-NEXT:    ds_store_b8 v1, v12 offset:11
+; GFX11-NEXT:    ds_store_b8 v1, v12 offset:9
+; GFX11-NEXT:    ds_store_b8 v1, v8 offset:10
+; GFX11-NEXT:    ds_store_b8 v1, v10 offset:11
 ; GFX11-NEXT:    s_endpgm
   store <3 x i32> %x, ptr addrspace(3) %out, align 1
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/ds-alignment.ll b/llvm/test/CodeGen/AMDGPU/ds-alignment.ll
index 93422e259b827..4b52d6efb8e98 100644
--- a/llvm/test/CodeGen/AMDGPU/ds-alignment.ll
+++ b/llvm/test/CodeGen/AMDGPU/ds-alignment.ll
@@ -105,14 +105,13 @@ define amdgpu_kernel void @ds4align1(ptr addrspace(3) %in, ptr addrspace(3) %out
 ; ALIGNED-GISEL-LABEL: ds4align1:
 ; ALIGNED-GISEL:       ; %bb.0:
 ; ALIGNED-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
-; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v4, 8
 ; ALIGNED-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v0, s0
 ; ALIGNED-GISEL-NEXT:    ds_read_u8 v1, v0
 ; ALIGNED-GISEL-NEXT:    ds_read_u8 v2, v0 offset:1
 ; ALIGNED-GISEL-NEXT:    ds_read_u8 v3, v0 offset:3
 ; ALIGNED-GISEL-NEXT:    ds_read_u8 v0, v0 offset:2
-; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v5, s1
+; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v4, s1
 ; ALIGNED-GISEL-NEXT:    s_waitcnt lgkmcnt(2)
 ; ALIGNED-GISEL-NEXT:    v_lshl_or_b32 v1, v2, 8, v1
 ; ALIGNED-GISEL-NEXT:    s_waitcnt lgkmcnt(1)
@@ -121,11 +120,11 @@ define amdgpu_kernel void @ds4align1(ptr addrspace(3) %in, ptr addrspace(3) %out
 ; ALIGNED-GISEL-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; ALIGNED-GISEL-NEXT:    v_or3_b32 v0, v2, v0, v1
 ; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_e32 v1, 8, v0
-; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v0
-; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1 offset:1
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v1, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v5, v0 offset:2
-; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1 offset:3
+; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v0
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v2, 24, v0
+; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v1 offset:1
+; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v4, v0 offset:2
+; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v2 offset:3
 ; ALIGNED-GISEL-NEXT:    s_endpgm
 ;
 ; UNALIGNED-LABEL: ds4align1:
@@ -262,14 +261,13 @@ define amdgpu_kernel void @ds8align1(ptr addrspace(3) %in, ptr addrspace(3) %out
 ; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v3, s1
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v3, v1
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v3, v2 offset:1
-; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v2, 8
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v4, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v2, 24, v1
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v3, v1 offset:2
-; ALIGNED-GISEL-NEXT:    ds_write_b8 v3, v4 offset:3
+; ALIGNED-GISEL-NEXT:    ds_write_b8 v3, v2 offset:3
 ; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_e32 v1, 8, v0
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v3, v0 offset:4
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v3, v1 offset:5
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v1, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v1, 24, v0
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v3, v0 offset:6
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v3, v1 offset:7
 ; ALIGNED-GISEL-NEXT:    s_endpgm
@@ -448,26 +446,25 @@ define amdgpu_kernel void @ds12align1(ptr addrspace(3) %in, ptr addrspace(3) %ou
 ; ALIGNED-GISEL-NEXT:    v_lshlrev_b32_e32 v4, 16, v5
 ; ALIGNED-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
 ; ALIGNED-GISEL-NEXT:    v_lshlrev_b32_e32 v0, 24, v0
+; ALIGNED-GISEL-NEXT:    v_or3_b32 v2, v6, v7, v2
 ; ALIGNED-GISEL-NEXT:    v_or3_b32 v0, v0, v4, v3
 ; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_e32 v3, 8, v1
 ; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v4, s1
-; ALIGNED-GISEL-NEXT:    v_or3_b32 v2, v6, v7, v2
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v1
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v3 offset:1
-; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v3, 8
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v5, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v3, 24, v1
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v4, v1 offset:2
-; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v5 offset:3
+; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v3 offset:3
 ; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_e32 v1, 8, v2
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v2 offset:4
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v1 offset:5
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v1, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v1, 24, v2
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v4, v2 offset:6
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v1 offset:7
 ; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_e32 v1, 8, v0
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v0 offset:8
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v1 offset:9
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v1, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v1, 24, v0
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v4, v0 offset:10
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v4, v1 offset:11
 ; ALIGNED-GISEL-NEXT:    s_endpgm
@@ -765,26 +762,25 @@ define amdgpu_kernel void @ds16align1(ptr addrspace(3) %in, ptr addrspace(3) %ou
 ; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v5, s1
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v4 offset:1
-; ALIGNED-GISEL-NEXT:    v_mov_b32_e32 v4, 8
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v6, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v4, 24, v1
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v5, v1 offset:2
-; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v6 offset:3
+; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v4 offset:3
 ; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_e32 v1, 8, v2
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v2 offset:4
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1 offset:5
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v1, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v1, 24, v2
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v5, v2 offset:6
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1 offset:7
 ; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_e32 v1, 8, v3
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v3 offset:8
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1 offset:9
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v1, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v1, 24, v3
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v5, v3 offset:10
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1 offset:11
 ; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_e32 v1, 8, v0
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v0 offset:12
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1 offset:13
-; ALIGNED-GISEL-NEXT:    v_lshrrev_b16_sdwa v1, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; ALIGNED-GISEL-NEXT:    v_lshrrev_b32_e32 v1, 24, v0
 ; ALIGNED-GISEL-NEXT:    ds_write_b8_d16_hi v5, v0 offset:14
 ; ALIGNED-GISEL-NEXT:    ds_write_b8 v5, v1 offset:15
 ; ALIGNED-GISEL-NEXT:    s_endpgm


        


More information about the llvm-commits mailing list