[llvm] [AMDGPU][GlobalISel] Add combines with or/and that only use half of 64bit values (PR #151519)
Mirko BrkuĊĦanin via llvm-commits
llvm-commits at lists.llvm.org
Thu Jul 31 06:49:52 PDT 2025
https://github.com/mbrkusanin updated https://github.com/llvm/llvm-project/pull/151519
>From dfe380508d8894cad2b3c5c2e308025ab86f9e71 Mon Sep 17 00:00:00 2001
From: Mirko Brkusanin <Mirko.Brkusanin at amd.com>
Date: Thu, 31 Jul 2025 15:40:06 +0200
Subject: [PATCH 1/2] pre-commit test
---
.../GlobalISel/combine-and-or-s64-s32.mir | 189 ++++++++++++++++++
1 file changed, 189 insertions(+)
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/combine-and-or-s64-s32.mir
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-and-or-s64-s32.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-and-or-s64-s32.mir
new file mode 100644
index 0000000000000..6efb66e26fce6
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-and-or-s64-s32.mir
@@ -0,0 +1,189 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn -mcpu=tahiti -run-pass=amdgpu-prelegalizer-combiner %s -o - | FileCheck %s
+
+---
+name: test_combine_or_s64_s32
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_combine_or_s64_s32
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr2
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = disjoint G_OR [[MV]], [[ZEXT]]
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[OR]](s64)
+ ; CHECK-NEXT: $sgpr0 = COPY [[UV]](s32)
+ ; CHECK-NEXT: $sgpr1 = COPY [[UV1]](s32)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s32) = COPY $sgpr2
+ %3:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
+ %4:_(s64) = G_ZEXT %2(s32)
+ %5:_(s64) = disjoint G_OR %3, %4
+ %6:_(s32), %7:_(s32) = G_UNMERGE_VALUES %5(s64)
+ $sgpr0 = COPY %6(s32)
+ $sgpr1 = COPY %7(s32)
+ SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+...
+---
+name: test_combine_and_s64_himask
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_combine_and_s64_himask
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4294967296
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[MV]], [[C]]
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AND]](s64)
+ ; CHECK-NEXT: $sgpr0 = COPY [[UV]](s32)
+ ; CHECK-NEXT: $sgpr1 = COPY [[UV1]](s32)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
+ %3:_(s64) = G_CONSTANT i64 -4294967296
+ %4:_(s64) = G_AND %2, %3
+ %5:_(s32), %6:_(s32) = G_UNMERGE_VALUES %4(s64)
+ $sgpr0 = COPY %5(s32)
+ $sgpr1 = COPY %6(s32)
+ SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+...
+---
+name: test_combined
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_combined
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr2
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4294967296
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[MV]], [[C]]
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = disjoint G_OR [[AND]], [[ZEXT]]
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[OR]](s64)
+ ; CHECK-NEXT: $sgpr0 = COPY [[UV]](s32)
+ ; CHECK-NEXT: $sgpr1 = COPY [[UV1]](s32)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s32) = COPY $sgpr2
+ %3:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
+ %4:_(s64) = G_CONSTANT i64 -4294967296
+ %5:_(s64) = G_AND %3, %4
+ %6:_(s64) = G_ZEXT %2(s32)
+ %7:_(s64) = disjoint G_OR %5, %6
+ %8:_(s32), %9:_(s32) = G_UNMERGE_VALUES %7(s64)
+ $sgpr0 = COPY %8(s32)
+ $sgpr1 = COPY %9(s32)
+ SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+...
+
+---
+name: test_combine_or_s64_s32_no_merge_unmerge
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_combine_or_s64_s32_no_merge_unmerge
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY1]](s32)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = disjoint G_OR [[COPY]], [[ZEXT]]
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[OR]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s32) = COPY $sgpr2
+ %2:_(s64) = G_ZEXT %1(s32)
+ %3:_(s64) = disjoint G_OR %0, %2
+ $sgpr0_sgpr1 = COPY %3(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+---
+name: test_combine_and_s64_himask_no_merge_unmerge
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_combine_and_s64_himask_no_merge_unmerge
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4294967296
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[AND]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = G_CONSTANT i64 -4294967296
+ %2:_(s64) = G_AND %0, %1
+ $sgpr0_sgpr1 = COPY %2(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+---
+name: test_combined_vgpr_no_merge_unmerge
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2
+
+ ; CHECK-LABEL: name: test_combined_vgpr_no_merge_unmerge
+ ; CHECK: liveins: $vgpr0_vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4294967296
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY1]](s32)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND]], [[ZEXT]]
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[OR]](s64)
+ %1:_(s64) = COPY $vgpr0_vgpr1
+ %0:_(s32) = COPY $vgpr2
+ %2:_(s64) = G_CONSTANT i64 -4294967296
+ %3:_(s64) = G_AND %1, %2
+ %4:_(s64) = G_ZEXT %0
+ %5:_(s64) = G_OR %3, %4
+ $vgpr0_vgpr1 = COPY %5
+...
+---
+name: negative_test_incorrect_types
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
+
+ ; CHECK-LABEL: name: negative_test_incorrect_types
+ ; CHECK: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr4_vgpr5
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -4294967296
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s128) = G_AND [[COPY]], [[C]]
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s64)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s128) = G_OR [[AND]], [[ZEXT]]
+ ; CHECK-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[OR]](s128)
+ %1:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %0:_(s64) = COPY $vgpr4_vgpr5
+ %2:_(s128) = G_CONSTANT i128 -4294967296
+ %3:_(s128) = G_AND %1, %2
+ %4:_(s128) = G_ZEXT %0
+ %5:_(s128) = G_OR %3, %4
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %5
+...
+
>From 2b1926647eaa313e22517961636b00a60a2a80c1 Mon Sep 17 00:00:00 2001
From: Mirko Brkusanin <Mirko.Brkusanin at amd.com>
Date: Thu, 31 Jul 2025 15:48:36 +0200
Subject: [PATCH 2/2] [AMDGPU][GlobalISel] Add combines with or/and that only
use half of 64bit values
---
llvm/lib/Target/AMDGPU/AMDGPUCombine.td | 21 +-
.../Target/AMDGPU/AMDGPUCombinerHelper.cpp | 26 +
llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h | 4 +
.../GlobalISel/combine-and-or-s64-s32.mir | 58 +-
...-divergent-i1-phis-no-lane-mask-merging.ll | 29 +-
llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll | 92 +-
llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll | 926 +++++++++---------
.../CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll | 10 +-
.../CodeGen/AMDGPU/GlobalISel/sext_inreg.ll | 30 +-
.../CodeGen/AMDGPU/GlobalISel/srem.i64.ll | 48 +-
.../CodeGen/AMDGPU/GlobalISel/udiv.i64.ll | 10 +-
.../CodeGen/AMDGPU/GlobalISel/urem.i64.ll | 10 +-
llvm/test/CodeGen/AMDGPU/div_i128.ll | 48 +-
llvm/test/CodeGen/AMDGPU/div_v2i128.ll | 64 +-
llvm/test/CodeGen/AMDGPU/itofp.i128.ll | 16 +-
15 files changed, 688 insertions(+), 704 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
index 9587fad1ecd63..97c3c8e9c10c8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
@@ -151,6 +151,21 @@ def zext_of_shift_amount_combines : GICombineGroup<[
canonicalize_zext_lshr, canonicalize_zext_ashr, canonicalize_zext_shl
]>;
+// (or i64:x, (zext i32:y)) -> i64:(merge (or i32:y, lo_32(x)), hi_32(x))
+def combine_or_s64_s32 : GICombineRule<
+ (defs root:$dst),
+ (match (G_ZEXT $zext_val, i32:$src_s32):$zext_inst,
+ (G_OR $dst, i64:$src_s64, $zext_val):$dst),
+ (apply [{ Helper.applyCombineOrS64S32(*${dst}, ${src_s64}.getReg(), ${src_s32}.getReg()); }])>;
+
+// (and i64:x, 0xFFFFFFFF00000000) -> i64:(merge i32:0, hi_32(x))
+def combine_and_s64_himask : GICombineRule<
+ (defs root:$dst),
+ (match (G_CONSTANT $const, 0xFFFFFFFF00000000),
+ (G_AND $dst, i64:$src_s64, $const):$dst),
+ (apply [{ Helper.applyCombineAndS64HiMask(*${dst}, ${src_s64}.getReg()); }])>;
+
+
let Predicates = [Has16BitInsts, NotHasMed3_16] in {
// For gfx8, expand f16-fmed3-as-f32 into a min/max f16 sequence. This
// saves one instruction compared to the promotion.
@@ -180,7 +195,8 @@ def gfx8_combines : GICombineGroup<[expand_promoted_fmed3]>;
def AMDGPUPreLegalizerCombiner: GICombiner<
"AMDGPUPreLegalizerCombinerImpl",
[all_combines, combine_fmul_with_select_to_fldexp, clamp_i64_to_i16,
- foldable_fneg, combine_shuffle_vector_to_build_vector]> {
+ foldable_fneg, combine_shuffle_vector_to_build_vector,
+ combine_or_s64_s32, combine_and_s64_himask]> {
let CombineAllMethodName = "tryCombineAllImpl";
}
@@ -188,7 +204,8 @@ def AMDGPUPostLegalizerCombiner: GICombiner<
"AMDGPUPostLegalizerCombinerImpl",
[all_combines, gfx6gfx7_combines, gfx8_combines, combine_fmul_with_select_to_fldexp,
uchar_to_float, cvt_f32_ubyteN, remove_fcanonicalize, foldable_fneg,
- rcp_sqrt_to_rsq, fdiv_by_sqrt_to_rsq_f16, sign_extension_in_reg, smulu64]> {
+ rcp_sqrt_to_rsq, fdiv_by_sqrt_to_rsq_f16, sign_extension_in_reg, smulu64,
+ combine_or_s64_s32, combine_and_s64_himask]> {
let CombineAllMethodName = "tryCombineAllImpl";
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
index bcc19932fd938..57cce72a10708 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
@@ -516,3 +516,29 @@ bool AMDGPUCombinerHelper::matchCombineFmulWithSelectToFldexp(
return true;
}
+
+void AMDGPUCombinerHelper::applyCombineOrS64S32(MachineInstr &MI,
+ Register SrcS64,
+ Register SrcS32) const {
+ Register DstReg = MI.getOperand(0).getReg();
+
+ auto UnmergeParts = Builder.buildUnmerge(LLT::scalar(32), SrcS64);
+ Register SrcS64Lo = UnmergeParts.getReg(0);
+ Register SrcS64Hi = UnmergeParts.getReg(1);
+
+ auto Or = Builder.buildOr(LLT::scalar(32), SrcS32, SrcS64Lo).getReg(0);
+ Builder.buildMergeValues(DstReg, {Or, SrcS64Hi});
+ MI.eraseFromParent();
+}
+
+void AMDGPUCombinerHelper::applyCombineAndS64HiMask(MachineInstr &MI,
+ Register SrcS64) const {
+ Register DstReg = MI.getOperand(0).getReg();
+
+ auto UnmergeParts = Builder.buildUnmerge(LLT::scalar(32), SrcS64);
+ Register SrcS64Hi = UnmergeParts.getReg(1);
+
+ auto Const = Builder.buildConstant(LLT::scalar(32), 0).getReg(0);
+ Builder.buildMergeValues(DstReg, {Const, SrcS64Hi});
+ MI.eraseFromParent();
+}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h
index 96e0b12a168a6..e772990f23cd6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.h
@@ -43,6 +43,10 @@ class AMDGPUCombinerHelper : public CombinerHelper {
bool matchCombineFmulWithSelectToFldexp(
MachineInstr &MI, MachineInstr &Sel,
std::function<void(MachineIRBuilder &)> &MatchInfo) const;
+
+ void applyCombineOrS64S32(MachineInstr &MI, Register S64, Register S32) const;
+
+ void applyCombineAndS64HiMask(MachineInstr &MI, Register S64) const;
};
} // namespace llvm
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-and-or-s64-s32.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-and-or-s64-s32.mir
index 6efb66e26fce6..89e64755a3670 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-and-or-s64-s32.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-and-or-s64-s32.mir
@@ -13,12 +13,9 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr2
- ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
- ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32)
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = disjoint G_OR [[MV]], [[ZEXT]]
- ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[OR]](s64)
- ; CHECK-NEXT: $sgpr0 = COPY [[UV]](s32)
- ; CHECK-NEXT: $sgpr1 = COPY [[UV1]](s32)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY2]], [[COPY]]
+ ; CHECK-NEXT: $sgpr0 = COPY [[OR]](s32)
+ ; CHECK-NEXT: $sgpr1 = COPY [[COPY1]](s32)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%0:_(s32) = COPY $sgpr0
%1:_(s32) = COPY $sgpr1
@@ -40,14 +37,10 @@ body: |
; CHECK-LABEL: name: test_combine_and_s64_himask
; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
- ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4294967296
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[MV]], [[C]]
- ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AND]](s64)
- ; CHECK-NEXT: $sgpr0 = COPY [[UV]](s32)
- ; CHECK-NEXT: $sgpr1 = COPY [[UV1]](s32)
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: $sgpr0 = COPY [[C]](s32)
+ ; CHECK-NEXT: $sgpr1 = COPY [[COPY]](s32)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%0:_(s32) = COPY $sgpr0
%1:_(s32) = COPY $sgpr1
@@ -68,17 +61,10 @@ body: |
; CHECK-LABEL: name: test_combined
; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr2
- ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4294967296
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[MV]], [[C]]
- ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32)
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = disjoint G_OR [[AND]], [[ZEXT]]
- ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[OR]](s64)
- ; CHECK-NEXT: $sgpr0 = COPY [[UV]](s32)
- ; CHECK-NEXT: $sgpr1 = COPY [[UV1]](s32)
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+ ; CHECK-NEXT: $sgpr0 = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: $sgpr1 = COPY [[COPY]](s32)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%0:_(s32) = COPY $sgpr0
%1:_(s32) = COPY $sgpr1
@@ -105,9 +91,10 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
- ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY1]](s32)
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = disjoint G_OR [[COPY]], [[ZEXT]]
- ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[OR]](s64)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY1]], [[UV]]
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[UV1]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s32) = COPY $sgpr2
@@ -126,9 +113,10 @@ body: |
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4294967296
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
- ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[AND]](s64)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[UV1]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s64) = G_CONSTANT i64 -4294967296
@@ -148,11 +136,9 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4294967296
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
- ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY1]](s32)
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND]], [[ZEXT]]
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[OR]](s64)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[UV1]](s32)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
%1:_(s64) = COPY $vgpr0_vgpr1
%0:_(s32) = COPY $vgpr2
%2:_(s64) = G_CONSTANT i64 -4294967296
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
index ff26ea21390e2..667fa988d8f9c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
@@ -227,23 +227,20 @@ exit:
define amdgpu_cs void @single_lane_execution_attribute(i32 inreg %.userdata0, <3 x i32> inreg %.WorkgroupId, <3 x i32> %.LocalInvocationId) #0 {
; GFX10-LABEL: single_lane_execution_attribute:
; GFX10: ; %bb.0: ; %.entry
-; GFX10-NEXT: s_mov_b32 s6, 0
; GFX10-NEXT: s_getpc_b64 s[4:5]
-; GFX10-NEXT: s_mov_b32 s7, -1
-; GFX10-NEXT: s_mov_b32 s2, s1
-; GFX10-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
-; GFX10-NEXT: s_mov_b32 s1, 0
+; GFX10-NEXT: s_mov_b32 s2, s0
+; GFX10-NEXT: s_mov_b32 s3, s5
; GFX10-NEXT: v_mbcnt_lo_u32_b32 v1, -1, 0
-; GFX10-NEXT: s_or_b64 s[12:13], s[4:5], s[0:1]
-; GFX10-NEXT: s_load_dwordx8 s[4:11], s[12:13], 0x0
+; GFX10-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x0
; GFX10-NEXT: v_mbcnt_hi_u32_b32 v1, -1, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 2, v1
; GFX10-NEXT: v_and_b32_e32 v3, 1, v1
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3
-; GFX10-NEXT: s_xor_b32 s3, vcc_lo, exec_lo
+; GFX10-NEXT: s_xor_b32 s2, vcc_lo, exec_lo
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_load_dword v2, v2, s[4:7], 0 offen
-; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s3
+; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2
+; GFX10-NEXT: s_mov_b32 s2, 0
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v2
; GFX10-NEXT: s_cbranch_vccnz .LBB4_4
@@ -251,9 +248,9 @@ define amdgpu_cs void @single_lane_execution_attribute(i32 inreg %.userdata0, <3
; GFX10-NEXT: s_mov_b32 s3, 0
; GFX10-NEXT: .LBB4_2: ; %.preheader
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_mov_b32_e32 v3, s1
+; GFX10-NEXT: v_mov_b32_e32 v3, s2
; GFX10-NEXT: v_add_nc_u32_e32 v1, -1, v1
-; GFX10-NEXT: s_add_i32 s1, s1, 4
+; GFX10-NEXT: s_add_i32 s2, s2, 4
; GFX10-NEXT: buffer_load_dword v3, v3, s[4:7], 0 offen
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -262,19 +259,19 @@ define amdgpu_cs void @single_lane_execution_attribute(i32 inreg %.userdata0, <3
; GFX10-NEXT: s_cbranch_vccnz .LBB4_2
; GFX10-NEXT: ; %bb.3: ; %.preheader._crit_edge
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, s3, v2
-; GFX10-NEXT: s_or_b32 s1, s0, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s1
+; GFX10-NEXT: s_or_b32 s2, s0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
; GFX10-NEXT: s_branch .LBB4_6
; GFX10-NEXT: .LBB4_4:
-; GFX10-NEXT: s_mov_b32 s1, exec_lo
+; GFX10-NEXT: s_mov_b32 s2, exec_lo
; GFX10-NEXT: ; implicit-def: $vgpr1
-; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s1
+; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2
; GFX10-NEXT: s_cbranch_vccz .LBB4_6
; GFX10-NEXT: ; %bb.5: ; %.19
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0
; GFX10-NEXT: v_or_b32_e32 v1, 2, v1
; GFX10-NEXT: .LBB4_6: ; %.22
-; GFX10-NEXT: v_add_lshl_u32 v0, v0, s2, 2
+; GFX10-NEXT: v_add_lshl_u32 v0, v0, s1, 2
; GFX10-NEXT: buffer_store_dword v1, v0, s[8:11], 0 offen
; GFX10-NEXT: s_endpgm
.entry:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
index fc81e16d68e98..dd61428811736 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
@@ -4959,17 +4959,15 @@ define amdgpu_ps i64 @s_fshl_i64_5(i64 inreg %lhs, i64 inreg %rhs) {
; GCN: ; %bb.0:
; GCN-NEXT: s_lshl_b64 s[0:1], s[0:1], 5
; GCN-NEXT: s_lshr_b32 s2, s3, 27
-; GCN-NEXT: s_mov_b32 s3, 0
-; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: s_or_b32 s0, s2, s0
; GCN-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: s_fshl_i64_5:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 5
; GFX11-NEXT: s_lshr_b32 s2, s3, 27
-; GFX11-NEXT: s_mov_b32 s3, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX11-NEXT: s_or_b32 s0, s2, s0
; GFX11-NEXT: ; return to shader part epilog
%result = call i64 @llvm.fshl.i64(i64 %lhs, i64 %rhs, i64 5)
ret i64 %result
@@ -4979,20 +4977,13 @@ define amdgpu_ps i64 @s_fshl_i64_32(i64 inreg %lhs, i64 inreg %rhs) {
; GCN-LABEL: s_fshl_i64_32:
; GCN: ; %bb.0:
; GCN-NEXT: s_mov_b32 s1, s0
-; GCN-NEXT: s_mov_b32 s0, 0
-; GCN-NEXT: s_mov_b32 s2, s3
-; GCN-NEXT: s_mov_b32 s3, s0
-; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: s_mov_b32 s0, s3
; GCN-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: s_fshl_i64_32:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_mov_b32 s1, s0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_mov_b32 s2, s3
-; GFX11-NEXT: s_mov_b32 s3, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX11-NEXT: s_mov_b32 s0, s3
; GFX11-NEXT: ; return to shader part epilog
%result = call i64 @llvm.fshl.i64(i64 %lhs, i64 %rhs, i64 32)
ret i64 %result
@@ -5097,7 +5088,7 @@ define i64 @v_fshl_i64_5(i64 %lhs, i64 %rhs) {
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 5
; GFX6-NEXT: v_lshrrev_b32_e32 v2, 27, v3
-; GFX6-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX6-NEXT: v_or_b32_e32 v0, v2, v0
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_fshl_i64_5:
@@ -5105,7 +5096,7 @@ define i64 @v_fshl_i64_5(i64 %lhs, i64 %rhs) {
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_lshlrev_b64 v[0:1], 5, v[0:1]
; GFX8-NEXT: v_lshrrev_b32_e32 v2, 27, v3
-; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_fshl_i64_5:
@@ -5113,7 +5104,7 @@ define i64 @v_fshl_i64_5(i64 %lhs, i64 %rhs) {
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 5, v[0:1]
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 27, v3
-; GFX9-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX9-NEXT: v_or_b32_e32 v0, v2, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_fshl_i64_5:
@@ -5121,7 +5112,7 @@ define i64 @v_fshl_i64_5(i64 %lhs, i64 %rhs) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b64 v[0:1], 5, v[0:1]
; GFX10-NEXT: v_lshrrev_b32_e32 v2, 27, v3
-; GFX10-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX10-NEXT: v_or_b32_e32 v0, v2, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_fshl_i64_5:
@@ -5130,7 +5121,7 @@ define i64 @v_fshl_i64_5(i64 %lhs, i64 %rhs) {
; GFX11-NEXT: v_lshlrev_b64 v[0:1], 5, v[0:1]
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 27, v3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_or_b32_e32 v0, v2, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%result = call i64 @llvm.fshl.i64(i64 %lhs, i64 %rhs, i64 5)
ret i64 %result
@@ -6876,57 +6867,56 @@ define amdgpu_ps i128 @s_fshl_i128_65(i128 inreg %lhs, i128 inreg %rhs) {
; GFX6-LABEL: s_fshl_i128_65:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_lshl_b64 s[2:3], s[0:1], 1
-; GFX6-NEXT: s_lshr_b32 s4, s5, 31
-; GFX6-NEXT: s_mov_b32 s5, 0
-; GFX6-NEXT: s_lshl_b64 s[0:1], s[6:7], 1
-; GFX6-NEXT: s_or_b64 s[0:1], s[4:5], s[0:1]
+; GFX6-NEXT: s_lshr_b32 s0, s5, 31
+; GFX6-NEXT: s_mov_b32 s1, 0
+; GFX6-NEXT: s_lshl_b64 s[4:5], s[6:7], 1
+; GFX6-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
; GFX6-NEXT: s_lshr_b32 s4, s7, 31
-; GFX6-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GFX6-NEXT: s_or_b32 s2, s4, s2
; GFX6-NEXT: ; return to shader part epilog
;
; GFX8-LABEL: s_fshl_i128_65:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_lshl_b64 s[2:3], s[0:1], 1
-; GFX8-NEXT: s_lshr_b32 s4, s5, 31
-; GFX8-NEXT: s_mov_b32 s5, 0
-; GFX8-NEXT: s_lshl_b64 s[0:1], s[6:7], 1
-; GFX8-NEXT: s_or_b64 s[0:1], s[4:5], s[0:1]
+; GFX8-NEXT: s_lshr_b32 s0, s5, 31
+; GFX8-NEXT: s_mov_b32 s1, 0
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 1
+; GFX8-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
; GFX8-NEXT: s_lshr_b32 s4, s7, 31
-; GFX8-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GFX8-NEXT: s_or_b32 s2, s4, s2
; GFX8-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_fshl_i128_65:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_lshl_b64 s[2:3], s[0:1], 1
-; GFX9-NEXT: s_lshr_b32 s4, s5, 31
-; GFX9-NEXT: s_mov_b32 s5, 0
-; GFX9-NEXT: s_lshl_b64 s[0:1], s[6:7], 1
-; GFX9-NEXT: s_or_b64 s[0:1], s[4:5], s[0:1]
+; GFX9-NEXT: s_lshr_b32 s0, s5, 31
+; GFX9-NEXT: s_mov_b32 s1, 0
+; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 1
+; GFX9-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
; GFX9-NEXT: s_lshr_b32 s4, s7, 31
-; GFX9-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GFX9-NEXT: s_or_b32 s2, s4, s2
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: s_fshl_i128_65:
; GFX10: ; %bb.0:
-; GFX10-NEXT: s_lshr_b32 s2, s5, 31
-; GFX10-NEXT: s_mov_b32 s3, 0
+; GFX10-NEXT: s_lshl_b64 s[2:3], s[0:1], 1
+; GFX10-NEXT: s_lshr_b32 s0, s5, 31
+; GFX10-NEXT: s_mov_b32 s1, 0
; GFX10-NEXT: s_lshl_b64 s[4:5], s[6:7], 1
-; GFX10-NEXT: s_lshl_b64 s[8:9], s[0:1], 1
-; GFX10-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
-; GFX10-NEXT: s_lshr_b32 s2, s7, 31
-; GFX10-NEXT: s_or_b64 s[2:3], s[8:9], s[2:3]
+; GFX10-NEXT: s_lshr_b32 s6, s7, 31
+; GFX10-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX10-NEXT: s_or_b32 s2, s6, s2
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: s_fshl_i128_65:
; GFX11: ; %bb.0:
-; GFX11-NEXT: s_lshr_b32 s2, s5, 31
-; GFX11-NEXT: s_mov_b32 s3, 0
+; GFX11-NEXT: s_lshl_b64 s[2:3], s[0:1], 1
+; GFX11-NEXT: s_lshr_b32 s0, s5, 31
+; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: s_lshl_b64 s[4:5], s[6:7], 1
-; GFX11-NEXT: s_lshl_b64 s[8:9], s[0:1], 1
-; GFX11-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
-; GFX11-NEXT: s_lshr_b32 s2, s7, 31
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b64 s[2:3], s[8:9], s[2:3]
+; GFX11-NEXT: s_lshr_b32 s6, s7, 31
+; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX11-NEXT: s_or_b32 s2, s6, s2
; GFX11-NEXT: ; return to shader part epilog
%result = call i128 @llvm.fshl.i128(i128 %lhs, i128 %rhs, i128 65)
ret i128 %result
@@ -6941,7 +6931,7 @@ define i128 @v_fshl_i128_65(i128 %lhs, i128 %rhs) {
; GFX6-NEXT: v_lshrrev_b32_e32 v4, 31, v5
; GFX6-NEXT: v_or_b32_e32 v0, v4, v0
; GFX6-NEXT: v_lshrrev_b32_e32 v4, 31, v7
-; GFX6-NEXT: v_or_b32_e32 v2, v2, v4
+; GFX6-NEXT: v_or_b32_e32 v2, v4, v2
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_fshl_i128_65:
@@ -6952,7 +6942,7 @@ define i128 @v_fshl_i128_65(i128 %lhs, i128 %rhs) {
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 31, v5
; GFX8-NEXT: v_or_b32_e32 v0, v4, v0
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 31, v7
-; GFX8-NEXT: v_or_b32_e32 v2, v2, v4
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_fshl_i128_65:
@@ -6963,7 +6953,7 @@ define i128 @v_fshl_i128_65(i128 %lhs, i128 %rhs) {
; GFX9-NEXT: v_lshrrev_b32_e32 v4, 31, v5
; GFX9-NEXT: v_or_b32_e32 v0, v4, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v4, 31, v7
-; GFX9-NEXT: v_or_b32_e32 v2, v2, v4
+; GFX9-NEXT: v_or_b32_e32 v2, v4, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_fshl_i128_65:
@@ -6974,7 +6964,7 @@ define i128 @v_fshl_i128_65(i128 %lhs, i128 %rhs) {
; GFX10-NEXT: v_lshrrev_b32_e32 v4, 31, v5
; GFX10-NEXT: v_lshrrev_b32_e32 v5, 31, v7
; GFX10-NEXT: v_or_b32_e32 v0, v4, v0
-; GFX10-NEXT: v_or_b32_e32 v2, v2, v5
+; GFX10-NEXT: v_or_b32_e32 v2, v5, v2
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_fshl_i128_65:
@@ -6986,7 +6976,7 @@ define i128 @v_fshl_i128_65(i128 %lhs, i128 %rhs) {
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 31, v7
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_or_b32_e32 v0, v4, v0
-; GFX11-NEXT: v_or_b32_e32 v2, v2, v5
+; GFX11-NEXT: v_or_b32_e32 v2, v5, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
%result = call i128 @llvm.fshl.i128(i128 %lhs, i128 %rhs, i128 65)
ret i128 %result
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
index 238cc06fc7f7c..a41cdee966f99 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
@@ -5007,20 +5007,13 @@ define amdgpu_ps i64 @s_fshr_i64_32(i64 inreg %lhs, i64 inreg %rhs) {
; GCN-LABEL: s_fshr_i64_32:
; GCN: ; %bb.0:
; GCN-NEXT: s_mov_b32 s1, s0
-; GCN-NEXT: s_mov_b32 s0, 0
-; GCN-NEXT: s_mov_b32 s2, s3
-; GCN-NEXT: s_mov_b32 s3, s0
-; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: s_mov_b32 s0, s3
; GCN-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: s_fshr_i64_32:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_mov_b32 s1, s0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_mov_b32 s2, s3
-; GFX11-NEXT: s_mov_b32 s3, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX11-NEXT: s_mov_b32 s0, s3
; GFX11-NEXT: ; return to shader part epilog
%result = call i64 @llvm.fshr.i64(i64 %lhs, i64 %rhs, i64 32)
ret i64 %result
@@ -5031,17 +5024,15 @@ define amdgpu_ps i64 @s_fshr_i64_48(i64 inreg %lhs, i64 inreg %rhs) {
; GCN: ; %bb.0:
; GCN-NEXT: s_lshl_b64 s[0:1], s[0:1], 16
; GCN-NEXT: s_lshr_b32 s2, s3, 16
-; GCN-NEXT: s_mov_b32 s3, 0
-; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: s_or_b32 s0, s2, s0
; GCN-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: s_fshr_i64_48:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 16
; GFX11-NEXT: s_lshr_b32 s2, s3, 16
-; GFX11-NEXT: s_mov_b32 s3, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX11-NEXT: s_or_b32 s0, s2, s0
; GFX11-NEXT: ; return to shader part epilog
%result = call i64 @llvm.fshr.i64(i64 %lhs, i64 %rhs, i64 48)
ret i64 %result
@@ -5189,28 +5180,28 @@ define i64 @v_fshr_i64_48(i64 %lhs, i64 %rhs) {
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 16
; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX6-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX6-NEXT: v_or_b32_e32 v0, v2, v0
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_fshr_i64_48:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_lshlrev_b64 v[0:1], 16, v[0:1]
-; GFX8-NEXT: v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_fshr_i64_48:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 16, v[0:1]
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_fshr_i64_48:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b64 v[0:1], 16, v[0:1]
-; GFX10-NEXT: v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_fshr_i64_48:
@@ -5219,7 +5210,7 @@ define i64 @v_fshr_i64_48(i64 %lhs, i64 %rhs) {
; GFX11-NEXT: v_lshlrev_b64 v[0:1], 16, v[0:1]
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX11-NEXT: v_or_b32_e32 v0, v2, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%result = call i64 @llvm.fshr.i64(i64 %lhs, i64 %rhs, i64 48)
ret i64 %result
@@ -5606,34 +5597,33 @@ define amdgpu_ps i128 @s_fshr_i128(i128 inreg %lhs, i128 inreg %rhs, i128 inreg
; GFX6-NEXT: s_lshl_b64 s[10:11], s[0:1], 1
; GFX6-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX6-NEXT: s_lshr_b32 s0, s1, 31
-; GFX6-NEXT: s_mov_b32 s1, 0
-; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
-; GFX6-NEXT: s_andn2_b32 s2, 0x7f, s8
+; GFX6-NEXT: s_or_b32 s2, s0, s2
+; GFX6-NEXT: s_andn2_b32 s0, 0x7f, s8
; GFX6-NEXT: s_not_b32 s9, s8
-; GFX6-NEXT: s_sub_i32 s16, s2, 64
-; GFX6-NEXT: s_sub_i32 s12, 64, s2
-; GFX6-NEXT: s_cmp_lt_u32 s2, 64
+; GFX6-NEXT: s_sub_i32 s16, s0, 64
+; GFX6-NEXT: s_sub_i32 s12, 64, s0
+; GFX6-NEXT: s_cmp_lt_u32 s0, 64
; GFX6-NEXT: s_cselect_b32 s17, 1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s2, 0
+; GFX6-NEXT: s_cmp_eq_u32 s0, 0
; GFX6-NEXT: s_cselect_b32 s18, 1, 0
; GFX6-NEXT: s_lshr_b64 s[12:13], s[10:11], s12
-; GFX6-NEXT: s_lshl_b64 s[14:15], s[0:1], s9
-; GFX6-NEXT: s_lshl_b64 s[2:3], s[10:11], s9
+; GFX6-NEXT: s_lshl_b64 s[14:15], s[2:3], s9
+; GFX6-NEXT: s_lshl_b64 s[0:1], s[10:11], s9
; GFX6-NEXT: s_or_b64 s[12:13], s[12:13], s[14:15]
; GFX6-NEXT: s_lshl_b64 s[10:11], s[10:11], s16
; GFX6-NEXT: s_cmp_lg_u32 s17, 0
-; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
+; GFX6-NEXT: s_cselect_b64 s[0:1], s[0:1], 0
; GFX6-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
; GFX6-NEXT: s_cmp_lg_u32 s18, 0
-; GFX6-NEXT: s_cselect_b64 s[10:11], s[0:1], s[10:11]
-; GFX6-NEXT: s_and_b32 s0, s8, 0x7f
-; GFX6-NEXT: s_sub_i32 s14, s0, 64
-; GFX6-NEXT: s_sub_i32 s12, 64, s0
-; GFX6-NEXT: s_cmp_lt_u32 s0, 64
+; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], s[10:11]
+; GFX6-NEXT: s_and_b32 s9, s8, 0x7f
+; GFX6-NEXT: s_sub_i32 s14, s9, 64
+; GFX6-NEXT: s_sub_i32 s12, 64, s9
+; GFX6-NEXT: s_cmp_lt_u32 s9, 64
; GFX6-NEXT: s_cselect_b32 s15, 1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s0, 0
+; GFX6-NEXT: s_cmp_eq_u32 s9, 0
; GFX6-NEXT: s_cselect_b32 s16, 1, 0
-; GFX6-NEXT: s_lshr_b64 s[0:1], s[6:7], s8
+; GFX6-NEXT: s_lshr_b64 s[10:11], s[6:7], s8
; GFX6-NEXT: s_lshr_b64 s[8:9], s[4:5], s8
; GFX6-NEXT: s_lshl_b64 s[12:13], s[6:7], s12
; GFX6-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
@@ -5643,9 +5633,9 @@ define amdgpu_ps i128 @s_fshr_i128(i128 inreg %lhs, i128 inreg %rhs, i128 inreg
; GFX6-NEXT: s_cmp_lg_u32 s16, 0
; GFX6-NEXT: s_cselect_b64 s[4:5], s[4:5], s[6:7]
; GFX6-NEXT: s_cmp_lg_u32 s15, 0
-; GFX6-NEXT: s_cselect_b64 s[6:7], s[0:1], 0
-; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
-; GFX6-NEXT: s_or_b64 s[2:3], s[10:11], s[6:7]
+; GFX6-NEXT: s_cselect_b64 s[6:7], s[10:11], 0
+; GFX6-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX6-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
; GFX6-NEXT: ; return to shader part epilog
;
; GFX8-LABEL: s_fshr_i128:
@@ -5653,34 +5643,33 @@ define amdgpu_ps i128 @s_fshr_i128(i128 inreg %lhs, i128 inreg %rhs, i128 inreg
; GFX8-NEXT: s_lshl_b64 s[10:11], s[0:1], 1
; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX8-NEXT: s_lshr_b32 s0, s1, 31
-; GFX8-NEXT: s_mov_b32 s1, 0
-; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
-; GFX8-NEXT: s_andn2_b32 s2, 0x7f, s8
+; GFX8-NEXT: s_or_b32 s2, s0, s2
+; GFX8-NEXT: s_andn2_b32 s0, 0x7f, s8
; GFX8-NEXT: s_not_b32 s9, s8
-; GFX8-NEXT: s_sub_i32 s16, s2, 64
-; GFX8-NEXT: s_sub_i32 s12, 64, s2
-; GFX8-NEXT: s_cmp_lt_u32 s2, 64
+; GFX8-NEXT: s_sub_i32 s16, s0, 64
+; GFX8-NEXT: s_sub_i32 s12, 64, s0
+; GFX8-NEXT: s_cmp_lt_u32 s0, 64
; GFX8-NEXT: s_cselect_b32 s17, 1, 0
-; GFX8-NEXT: s_cmp_eq_u32 s2, 0
+; GFX8-NEXT: s_cmp_eq_u32 s0, 0
; GFX8-NEXT: s_cselect_b32 s18, 1, 0
; GFX8-NEXT: s_lshr_b64 s[12:13], s[10:11], s12
-; GFX8-NEXT: s_lshl_b64 s[14:15], s[0:1], s9
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[10:11], s9
+; GFX8-NEXT: s_lshl_b64 s[14:15], s[2:3], s9
+; GFX8-NEXT: s_lshl_b64 s[0:1], s[10:11], s9
; GFX8-NEXT: s_or_b64 s[12:13], s[12:13], s[14:15]
; GFX8-NEXT: s_lshl_b64 s[10:11], s[10:11], s16
; GFX8-NEXT: s_cmp_lg_u32 s17, 0
-; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
+; GFX8-NEXT: s_cselect_b64 s[0:1], s[0:1], 0
; GFX8-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
; GFX8-NEXT: s_cmp_lg_u32 s18, 0
-; GFX8-NEXT: s_cselect_b64 s[10:11], s[0:1], s[10:11]
-; GFX8-NEXT: s_and_b32 s0, s8, 0x7f
-; GFX8-NEXT: s_sub_i32 s14, s0, 64
-; GFX8-NEXT: s_sub_i32 s12, 64, s0
-; GFX8-NEXT: s_cmp_lt_u32 s0, 64
+; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], s[10:11]
+; GFX8-NEXT: s_and_b32 s9, s8, 0x7f
+; GFX8-NEXT: s_sub_i32 s14, s9, 64
+; GFX8-NEXT: s_sub_i32 s12, 64, s9
+; GFX8-NEXT: s_cmp_lt_u32 s9, 64
; GFX8-NEXT: s_cselect_b32 s15, 1, 0
-; GFX8-NEXT: s_cmp_eq_u32 s0, 0
+; GFX8-NEXT: s_cmp_eq_u32 s9, 0
; GFX8-NEXT: s_cselect_b32 s16, 1, 0
-; GFX8-NEXT: s_lshr_b64 s[0:1], s[6:7], s8
+; GFX8-NEXT: s_lshr_b64 s[10:11], s[6:7], s8
; GFX8-NEXT: s_lshr_b64 s[8:9], s[4:5], s8
; GFX8-NEXT: s_lshl_b64 s[12:13], s[6:7], s12
; GFX8-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
@@ -5690,9 +5679,9 @@ define amdgpu_ps i128 @s_fshr_i128(i128 inreg %lhs, i128 inreg %rhs, i128 inreg
; GFX8-NEXT: s_cmp_lg_u32 s16, 0
; GFX8-NEXT: s_cselect_b64 s[4:5], s[4:5], s[6:7]
; GFX8-NEXT: s_cmp_lg_u32 s15, 0
-; GFX8-NEXT: s_cselect_b64 s[6:7], s[0:1], 0
-; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
-; GFX8-NEXT: s_or_b64 s[2:3], s[10:11], s[6:7]
+; GFX8-NEXT: s_cselect_b64 s[6:7], s[10:11], 0
+; GFX8-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX8-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
; GFX8-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_fshr_i128:
@@ -5700,34 +5689,33 @@ define amdgpu_ps i128 @s_fshr_i128(i128 inreg %lhs, i128 inreg %rhs, i128 inreg
; GFX9-NEXT: s_lshl_b64 s[10:11], s[0:1], 1
; GFX9-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX9-NEXT: s_lshr_b32 s0, s1, 31
-; GFX9-NEXT: s_mov_b32 s1, 0
-; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
-; GFX9-NEXT: s_andn2_b32 s2, 0x7f, s8
+; GFX9-NEXT: s_or_b32 s2, s0, s2
+; GFX9-NEXT: s_andn2_b32 s0, 0x7f, s8
; GFX9-NEXT: s_not_b32 s9, s8
-; GFX9-NEXT: s_sub_i32 s16, s2, 64
-; GFX9-NEXT: s_sub_i32 s12, 64, s2
-; GFX9-NEXT: s_cmp_lt_u32 s2, 64
+; GFX9-NEXT: s_sub_i32 s16, s0, 64
+; GFX9-NEXT: s_sub_i32 s12, 64, s0
+; GFX9-NEXT: s_cmp_lt_u32 s0, 64
; GFX9-NEXT: s_cselect_b32 s17, 1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s2, 0
+; GFX9-NEXT: s_cmp_eq_u32 s0, 0
; GFX9-NEXT: s_cselect_b32 s18, 1, 0
; GFX9-NEXT: s_lshr_b64 s[12:13], s[10:11], s12
-; GFX9-NEXT: s_lshl_b64 s[14:15], s[0:1], s9
-; GFX9-NEXT: s_lshl_b64 s[2:3], s[10:11], s9
+; GFX9-NEXT: s_lshl_b64 s[14:15], s[2:3], s9
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[10:11], s9
; GFX9-NEXT: s_or_b64 s[12:13], s[12:13], s[14:15]
; GFX9-NEXT: s_lshl_b64 s[10:11], s[10:11], s16
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
+; GFX9-NEXT: s_cselect_b64 s[0:1], s[0:1], 0
; GFX9-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cselect_b64 s[10:11], s[0:1], s[10:11]
-; GFX9-NEXT: s_and_b32 s0, s8, 0x7f
-; GFX9-NEXT: s_sub_i32 s14, s0, 64
-; GFX9-NEXT: s_sub_i32 s12, 64, s0
-; GFX9-NEXT: s_cmp_lt_u32 s0, 64
+; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], s[10:11]
+; GFX9-NEXT: s_and_b32 s9, s8, 0x7f
+; GFX9-NEXT: s_sub_i32 s14, s9, 64
+; GFX9-NEXT: s_sub_i32 s12, 64, s9
+; GFX9-NEXT: s_cmp_lt_u32 s9, 64
; GFX9-NEXT: s_cselect_b32 s15, 1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s0, 0
+; GFX9-NEXT: s_cmp_eq_u32 s9, 0
; GFX9-NEXT: s_cselect_b32 s16, 1, 0
-; GFX9-NEXT: s_lshr_b64 s[0:1], s[6:7], s8
+; GFX9-NEXT: s_lshr_b64 s[10:11], s[6:7], s8
; GFX9-NEXT: s_lshr_b64 s[8:9], s[4:5], s8
; GFX9-NEXT: s_lshl_b64 s[12:13], s[6:7], s12
; GFX9-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
@@ -5737,19 +5725,18 @@ define amdgpu_ps i128 @s_fshr_i128(i128 inreg %lhs, i128 inreg %rhs, i128 inreg
; GFX9-NEXT: s_cmp_lg_u32 s16, 0
; GFX9-NEXT: s_cselect_b64 s[4:5], s[4:5], s[6:7]
; GFX9-NEXT: s_cmp_lg_u32 s15, 0
-; GFX9-NEXT: s_cselect_b64 s[6:7], s[0:1], 0
-; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
-; GFX9-NEXT: s_or_b64 s[2:3], s[10:11], s[6:7]
+; GFX9-NEXT: s_cselect_b64 s[6:7], s[10:11], 0
+; GFX9-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX9-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: s_fshr_i128:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX10-NEXT: s_lshr_b32 s10, s1, 31
-; GFX10-NEXT: s_mov_b32 s11, 0
-; GFX10-NEXT: s_andn2_b32 s9, 0x7f, s8
+; GFX10-NEXT: s_lshr_b32 s9, s1, 31
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
-; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11]
+; GFX10-NEXT: s_or_b32 s2, s9, s2
+; GFX10-NEXT: s_andn2_b32 s9, 0x7f, s8
; GFX10-NEXT: s_not_b32 s14, s8
; GFX10-NEXT: s_sub_i32 s16, s9, 64
; GFX10-NEXT: s_sub_i32 s10, 64, s9
@@ -5792,11 +5779,10 @@ define amdgpu_ps i128 @s_fshr_i128(i128 inreg %lhs, i128 inreg %rhs, i128 inreg
; GFX11-LABEL: s_fshr_i128:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX11-NEXT: s_lshr_b32 s10, s1, 31
-; GFX11-NEXT: s_mov_b32 s11, 0
-; GFX11-NEXT: s_and_not1_b32 s9, 0x7f, s8
+; GFX11-NEXT: s_lshr_b32 s9, s1, 31
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
-; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11]
+; GFX11-NEXT: s_or_b32 s2, s9, s2
+; GFX11-NEXT: s_and_not1_b32 s9, 0x7f, s8
; GFX11-NEXT: s_not_b32 s14, s8
; GFX11-NEXT: s_sub_i32 s16, s9, 64
; GFX11-NEXT: s_sub_i32 s10, 64, s9
@@ -5847,7 +5833,7 @@ define i128 @v_fshr_i128(i128 %lhs, i128 %rhs, i128 %amt) {
; GFX6-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
; GFX6-NEXT: v_lshl_b64 v[9:10], v[0:1], 1
; GFX6-NEXT: v_lshrrev_b32_e32 v0, 31, v1
-; GFX6-NEXT: v_or_b32_e32 v2, v2, v0
+; GFX6-NEXT: v_or_b32_e32 v2, v0, v2
; GFX6-NEXT: v_not_b32_e32 v0, v8
; GFX6-NEXT: v_and_b32_e32 v15, 0x7f, v0
; GFX6-NEXT: v_sub_i32_e32 v0, vcc, 64, v15
@@ -5896,7 +5882,7 @@ define i128 @v_fshr_i128(i128 %lhs, i128 %rhs, i128 %amt) {
; GFX8-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
; GFX8-NEXT: v_lshlrev_b64 v[9:10], 1, v[0:1]
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 31, v1
-; GFX8-NEXT: v_or_b32_e32 v2, v2, v0
+; GFX8-NEXT: v_or_b32_e32 v2, v0, v2
; GFX8-NEXT: v_not_b32_e32 v0, v8
; GFX8-NEXT: v_and_b32_e32 v15, 0x7f, v0
; GFX8-NEXT: v_sub_u32_e32 v0, vcc, 64, v15
@@ -5945,7 +5931,7 @@ define i128 @v_fshr_i128(i128 %lhs, i128 %rhs, i128 %amt) {
; GFX9-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
; GFX9-NEXT: v_lshlrev_b64 v[9:10], 1, v[0:1]
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 31, v1
-; GFX9-NEXT: v_or_b32_e32 v2, v2, v0
+; GFX9-NEXT: v_or_b32_e32 v2, v0, v2
; GFX9-NEXT: v_not_b32_e32 v0, v8
; GFX9-NEXT: v_and_b32_e32 v15, 0x7f, v0
; GFX9-NEXT: v_sub_u32_e32 v0, 64, v15
@@ -5996,7 +5982,7 @@ define i128 @v_fshr_i128(i128 %lhs, i128 %rhs, i128 %amt) {
; GFX10-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
; GFX10-NEXT: v_and_b32_e32 v21, 0x7f, v8
; GFX10-NEXT: v_and_b32_e32 v20, 0x7f, v9
-; GFX10-NEXT: v_or_b32_e32 v2, v2, v10
+; GFX10-NEXT: v_or_b32_e32 v2, v10, v2
; GFX10-NEXT: v_sub_nc_u32_e32 v16, 64, v21
; GFX10-NEXT: v_sub_nc_u32_e32 v12, 64, v20
; GFX10-NEXT: v_add_nc_u32_e32 v14, 0xffffffc0, v20
@@ -6044,7 +6030,7 @@ define i128 @v_fshr_i128(i128 %lhs, i128 %rhs, i128 %amt) {
; GFX11-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_and_b32_e32 v20, 0x7f, v9
-; GFX11-NEXT: v_or_b32_e32 v2, v2, v10
+; GFX11-NEXT: v_or_b32_e32 v2, v10, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_sub_nc_u32_e32 v12, 64, v20
; GFX11-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v20
@@ -6103,13 +6089,12 @@ define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
; GFX6-NEXT: s_lshl_b64 s[8:9], s[0:1], 1
; GFX6-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX6-NEXT: s_lshr_b32 s0, s1, 31
-; GFX6-NEXT: s_mov_b32 s1, 0
; GFX6-NEXT: v_and_b32_e32 v7, 0x7f, v1
-; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
+; GFX6-NEXT: s_or_b32 s2, s0, s2
; GFX6-NEXT: v_sub_i32_e32 v1, vcc, 64, v7
; GFX6-NEXT: v_not_b32_e32 v8, 63
; GFX6-NEXT: v_lshr_b64 v[1:2], s[8:9], v1
-; GFX6-NEXT: v_lshl_b64 v[3:4], s[0:1], v7
+; GFX6-NEXT: v_lshl_b64 v[3:4], s[2:3], v7
; GFX6-NEXT: v_add_i32_e32 v9, vcc, v7, v8
; GFX6-NEXT: v_lshl_b64 v[5:6], s[8:9], v7
; GFX6-NEXT: v_or_b32_e32 v3, v1, v3
@@ -6120,8 +6105,8 @@ define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
; GFX6-NEXT: v_cndmask_b32_e32 v6, 0, v6, vcc
; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
-; GFX6-NEXT: v_mov_b32_e32 v3, s0
-; GFX6-NEXT: v_mov_b32_e32 v4, s1
+; GFX6-NEXT: v_mov_b32_e32 v3, s2
+; GFX6-NEXT: v_mov_b32_e32 v4, s3
; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX6-NEXT: v_and_b32_e32 v11, 0x7f, v0
; GFX6-NEXT: v_cndmask_b32_e32 v7, v1, v3, vcc
@@ -6156,13 +6141,12 @@ define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
; GFX8-NEXT: s_lshl_b64 s[8:9], s[0:1], 1
; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX8-NEXT: s_lshr_b32 s0, s1, 31
-; GFX8-NEXT: s_mov_b32 s1, 0
; GFX8-NEXT: v_and_b32_e32 v7, 0x7f, v1
-; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
+; GFX8-NEXT: s_or_b32 s2, s0, s2
; GFX8-NEXT: v_sub_u32_e32 v1, vcc, 64, v7
; GFX8-NEXT: v_not_b32_e32 v8, 63
; GFX8-NEXT: v_lshrrev_b64 v[1:2], v1, s[8:9]
-; GFX8-NEXT: v_lshlrev_b64 v[3:4], v7, s[0:1]
+; GFX8-NEXT: v_lshlrev_b64 v[3:4], v7, s[2:3]
; GFX8-NEXT: v_add_u32_e32 v9, vcc, v7, v8
; GFX8-NEXT: v_lshlrev_b64 v[5:6], v7, s[8:9]
; GFX8-NEXT: v_or_b32_e32 v3, v1, v3
@@ -6173,8 +6157,8 @@ define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
; GFX8-NEXT: v_cndmask_b32_e32 v6, 0, v6, vcc
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
-; GFX8-NEXT: v_mov_b32_e32 v3, s0
-; GFX8-NEXT: v_mov_b32_e32 v4, s1
+; GFX8-NEXT: v_mov_b32_e32 v3, s2
+; GFX8-NEXT: v_mov_b32_e32 v4, s3
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX8-NEXT: v_and_b32_e32 v11, 0x7f, v0
; GFX8-NEXT: v_cndmask_b32_e32 v7, v1, v3, vcc
@@ -6209,12 +6193,11 @@ define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
; GFX9-NEXT: s_lshl_b64 s[8:9], s[0:1], 1
; GFX9-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX9-NEXT: s_lshr_b32 s0, s1, 31
-; GFX9-NEXT: s_mov_b32 s1, 0
; GFX9-NEXT: v_and_b32_e32 v7, 0x7f, v1
-; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
+; GFX9-NEXT: s_or_b32 s2, s0, s2
; GFX9-NEXT: v_sub_u32_e32 v1, 64, v7
; GFX9-NEXT: v_lshrrev_b64 v[1:2], v1, s[8:9]
-; GFX9-NEXT: v_lshlrev_b64 v[3:4], v7, s[0:1]
+; GFX9-NEXT: v_lshlrev_b64 v[3:4], v7, s[2:3]
; GFX9-NEXT: v_add_u32_e32 v8, 0xffffffc0, v7
; GFX9-NEXT: v_lshlrev_b64 v[5:6], v7, s[8:9]
; GFX9-NEXT: v_or_b32_e32 v3, v1, v3
@@ -6225,10 +6208,10 @@ define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
; GFX9-NEXT: v_cndmask_b32_e32 v6, 0, v6, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
-; GFX9-NEXT: v_mov_b32_e32 v4, s1
+; GFX9-NEXT: v_mov_b32_e32 v4, s3
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX9-NEXT: v_and_b32_e32 v10, 0x7f, v0
-; GFX9-NEXT: v_mov_b32_e32 v3, s0
+; GFX9-NEXT: v_mov_b32_e32 v3, s2
; GFX9-NEXT: v_cndmask_b32_e32 v9, v2, v4, vcc
; GFX9-NEXT: v_sub_u32_e32 v2, 64, v10
; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v3, vcc
@@ -6258,100 +6241,101 @@ define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
; GFX10-LABEL: v_fshr_i128_ssv:
; GFX10: ; %bb.0:
; GFX10-NEXT: v_not_b32_e32 v1, v0
-; GFX10-NEXT: s_mov_b32 s9, 0
-; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX10-NEXT: s_lshr_b32 s8, s1, 31
-; GFX10-NEXT: v_and_b32_e32 v13, 0x7f, v0
-; GFX10-NEXT: v_and_b32_e32 v12, 0x7f, v1
-; GFX10-NEXT: s_lshl_b64 s[10:11], s[0:1], 1
-; GFX10-NEXT: s_or_b64 s[8:9], s[2:3], s[8:9]
-; GFX10-NEXT: v_sub_nc_u32_e32 v10, 64, v13
-; GFX10-NEXT: v_sub_nc_u32_e32 v2, 64, v12
-; GFX10-NEXT: v_add_nc_u32_e32 v6, 0xffffffc0, v12
-; GFX10-NEXT: v_lshlrev_b64 v[0:1], v12, s[8:9]
-; GFX10-NEXT: v_add_nc_u32_e32 v14, 0xffffffc0, v13
-; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v12
-; GFX10-NEXT: v_lshrrev_b64 v[2:3], v2, s[10:11]
-; GFX10-NEXT: v_lshlrev_b64 v[6:7], v6, s[10:11]
-; GFX10-NEXT: v_lshrrev_b64 v[8:9], v13, s[4:5]
+; GFX10-NEXT: v_and_b32_e32 v15, 0x7f, v0
+; GFX10-NEXT: s_lshl_b64 s[8:9], s[2:3], 1
+; GFX10-NEXT: s_lshr_b32 s2, s1, 31
+; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
+; GFX10-NEXT: v_and_b32_e32 v14, 0x7f, v1
+; GFX10-NEXT: s_or_b32 s8, s2, s8
+; GFX10-NEXT: v_sub_nc_u32_e32 v10, 64, v15
+; GFX10-NEXT: v_add_nc_u32_e32 v12, 0xffffffc0, v15
+; GFX10-NEXT: v_lshrrev_b64 v[8:9], v15, s[4:5]
+; GFX10-NEXT: v_sub_nc_u32_e32 v4, 64, v14
+; GFX10-NEXT: v_add_nc_u32_e32 v6, 0xffffffc0, v14
+; GFX10-NEXT: v_lshlrev_b64 v[2:3], v14, s[8:9]
; GFX10-NEXT: v_lshlrev_b64 v[10:11], v10, s[6:7]
-; GFX10-NEXT: v_cmp_gt_u32_e64 s1, 64, v13
-; GFX10-NEXT: v_lshlrev_b64 v[4:5], v12, s[10:11]
-; GFX10-NEXT: v_or_b32_e32 v0, v2, v0
-; GFX10-NEXT: v_or_b32_e32 v2, v3, v1
-; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v12
-; GFX10-NEXT: v_or_b32_e32 v3, v8, v10
-; GFX10-NEXT: v_or_b32_e32 v8, v9, v11
-; GFX10-NEXT: v_cndmask_b32_e32 v6, v6, v0, vcc_lo
-; GFX10-NEXT: v_lshrrev_b64 v[0:1], v14, s[6:7]
-; GFX10-NEXT: v_cndmask_b32_e32 v7, v7, v2, vcc_lo
-; GFX10-NEXT: v_cmp_eq_u32_e64 s2, 0, v13
-; GFX10-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, s8, s0
-; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v3, s1
-; GFX10-NEXT: v_lshrrev_b64 v[2:3], v13, s[6:7]
-; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, v8, s1
-; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, s9, s0
-; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s4, s2
-; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s5, s2
+; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v14
+; GFX10-NEXT: v_lshrrev_b64 v[4:5], v4, s[0:1]
+; GFX10-NEXT: v_lshlrev_b64 v[6:7], v6, s[0:1]
+; GFX10-NEXT: v_lshrrev_b64 v[12:13], v12, s[6:7]
+; GFX10-NEXT: v_lshlrev_b64 v[0:1], v14, s[0:1]
+; GFX10-NEXT: v_cmp_gt_u32_e64 s1, 64, v15
+; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v14
+; GFX10-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX10-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX10-NEXT: v_or_b32_e32 v5, v9, v11
+; GFX10-NEXT: v_cmp_eq_u32_e64 s2, 0, v15
+; GFX10-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v6, v2, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v2, v8, v10
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v7, v3, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX10-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, s8, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v12, v2, s1
+; GFX10-NEXT: v_lshrrev_b64 v[2:3], v15, s[6:7]
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, s9, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, s5, s2
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, s4, s2
; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, v2, s1
; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, v3, s1
-; GFX10-NEXT: v_or_b32_e32 v0, v4, v0
-; GFX10-NEXT: v_or_b32_e32 v1, v5, v1
-; GFX10-NEXT: v_or_b32_e32 v2, v6, v2
-; GFX10-NEXT: v_or_b32_e32 v3, v7, v3
+; GFX10-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v7
+; GFX10-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX10-NEXT: v_or_b32_e32 v3, v6, v3
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: v_fshr_i128_ssv:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_not_b32_e32 v1, v0
-; GFX11-NEXT: s_mov_b32 s9, 0
-; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX11-NEXT: s_lshr_b32 s8, s1, 31
-; GFX11-NEXT: v_and_b32_e32 v13, 0x7f, v0
-; GFX11-NEXT: v_and_b32_e32 v12, 0x7f, v1
-; GFX11-NEXT: s_lshl_b64 s[10:11], s[0:1], 1
-; GFX11-NEXT: s_or_b64 s[8:9], s[2:3], s[8:9]
+; GFX11-NEXT: s_lshl_b64 s[8:9], s[2:3], 1
+; GFX11-NEXT: s_lshr_b32 s2, s1, 31
+; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
+; GFX11-NEXT: s_or_b32 s8, s2, s8
+; GFX11-NEXT: v_and_b32_e32 v14, 0x7f, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_sub_nc_u32_e32 v4, 64, v14
+; GFX11-NEXT: v_lshlrev_b64 v[2:3], v14, s[8:9]
+; GFX11-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v14
+; GFX11-NEXT: v_add_nc_u32_e32 v6, 0xffffffc0, v14
+; GFX11-NEXT: v_lshrrev_b64 v[4:5], v4, s[0:1]
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_sub_nc_u32_e32 v10, 64, v13
-; GFX11-NEXT: v_sub_nc_u32_e32 v2, 64, v12
-; GFX11-NEXT: v_lshlrev_b64 v[0:1], v12, s[8:9]
-; GFX11-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v6, 0xffffffc0, v12
-; GFX11-NEXT: v_add_nc_u32_e32 v14, 0xffffffc0, v13
-; GFX11-NEXT: v_lshrrev_b64 v[2:3], v2, s[10:11]
-; GFX11-NEXT: v_lshrrev_b64 v[8:9], v13, s[4:5]
+; GFX11-NEXT: v_lshlrev_b64 v[6:7], v6, s[0:1]
+; GFX11-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX11-NEXT: v_and_b32_e32 v15, 0x7f, v0
+; GFX11-NEXT: v_lshlrev_b64 v[0:1], v14, s[0:1]
+; GFX11-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 0, v14
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v6, v2, vcc_lo
+; GFX11-NEXT: v_sub_nc_u32_e32 v10, 64, v15
+; GFX11-NEXT: v_add_nc_u32_e32 v12, 0xffffffc0, v15
+; GFX11-NEXT: v_lshrrev_b64 v[8:9], v15, s[4:5]
+; GFX11-NEXT: v_cmp_gt_u32_e64 s1, 64, v15
+; GFX11-NEXT: v_cndmask_b32_e32 v6, v7, v3, vcc_lo
; GFX11-NEXT: v_lshlrev_b64 v[10:11], v10, s[6:7]
-; GFX11-NEXT: v_lshlrev_b64 v[6:7], v6, s[10:11]
-; GFX11-NEXT: v_cmp_gt_u32_e64 s1, 64, v13
-; GFX11-NEXT: v_cmp_eq_u32_e64 s2, 0, v13
-; GFX11-NEXT: v_or_b32_e32 v0, v2, v0
-; GFX11-NEXT: v_or_b32_e32 v2, v3, v1
-; GFX11-NEXT: v_or_b32_e32 v3, v8, v10
-; GFX11-NEXT: v_or_b32_e32 v8, v9, v11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v6, v0, vcc_lo
-; GFX11-NEXT: v_lshrrev_b64 v[0:1], v14, s[6:7]
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v7, v2, vcc_lo
-; GFX11-NEXT: v_lshlrev_b64 v[4:5], v12, s[10:11]
-; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 0, v12
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, v3, s1
-; GFX11-NEXT: v_lshrrev_b64 v[2:3], v13, s[6:7]
-; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, v8, s1
-; GFX11-NEXT: v_dual_cndmask_b32 v4, 0, v4 :: v_dual_cndmask_b32 v5, 0, v5
-; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, s8, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, s9, s0
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s4, s2
-; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s5, s2
+; GFX11-NEXT: v_lshrrev_b64 v[12:13], v12, s[6:7]
+; GFX11-NEXT: v_cmp_eq_u32_e64 s2, 0, v15
+; GFX11-NEXT: v_dual_cndmask_b32 v0, 0, v0 :: v_dual_cndmask_b32 v1, 0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v4, v4, s8, s0
+; GFX11-NEXT: v_or_b32_e32 v2, v8, v10
+; GFX11-NEXT: v_or_b32_e32 v5, v9, v11
+; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, s9, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e64 v7, v12, v2, s1
+; GFX11-NEXT: v_lshrrev_b64 v[2:3], v15, s[6:7]
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v13, v5, s1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v7, v7, s4, s2
+; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, s5, s2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, v2, s1
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, v3, s1
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v7
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_or_b32_e32 v0, v4, v0
-; GFX11-NEXT: v_or_b32_e32 v1, v5, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_or_b32_e32 v2, v6, v2
-; GFX11-NEXT: v_or_b32_e32 v3, v7, v3
+; GFX11-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX11-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_or_b32_e32 v3, v6, v3
; GFX11-NEXT: ; return to shader part epilog
%result = call i128 @llvm.fshr.i128(i128 %lhs, i128 %rhs, i128 %amt)
%cast.result = bitcast i128 %result to <4 x float>
@@ -6364,26 +6348,25 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX6-NEXT: s_lshl_b64 s[6:7], s[0:1], 1
; GFX6-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX6-NEXT: s_lshr_b32 s0, s1, 31
-; GFX6-NEXT: s_mov_b32 s1, 0
-; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
-; GFX6-NEXT: s_andn2_b32 s2, 0x7f, s4
+; GFX6-NEXT: s_or_b32 s2, s0, s2
+; GFX6-NEXT: s_andn2_b32 s0, 0x7f, s4
; GFX6-NEXT: s_not_b32 s5, s4
-; GFX6-NEXT: s_sub_i32 s12, s2, 64
-; GFX6-NEXT: s_sub_i32 s8, 64, s2
-; GFX6-NEXT: s_cmp_lt_u32 s2, 64
+; GFX6-NEXT: s_sub_i32 s12, s0, 64
+; GFX6-NEXT: s_sub_i32 s8, 64, s0
+; GFX6-NEXT: s_cmp_lt_u32 s0, 64
; GFX6-NEXT: s_cselect_b32 s13, 1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s2, 0
+; GFX6-NEXT: s_cmp_eq_u32 s0, 0
; GFX6-NEXT: s_cselect_b32 s14, 1, 0
; GFX6-NEXT: s_lshr_b64 s[8:9], s[6:7], s8
-; GFX6-NEXT: s_lshl_b64 s[10:11], s[0:1], s5
-; GFX6-NEXT: s_lshl_b64 s[2:3], s[6:7], s5
+; GFX6-NEXT: s_lshl_b64 s[10:11], s[2:3], s5
+; GFX6-NEXT: s_lshl_b64 s[0:1], s[6:7], s5
; GFX6-NEXT: s_or_b64 s[8:9], s[8:9], s[10:11]
; GFX6-NEXT: s_lshl_b64 s[6:7], s[6:7], s12
; GFX6-NEXT: s_cmp_lg_u32 s13, 0
-; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
-; GFX6-NEXT: s_cselect_b64 s[6:7], s[8:9], s[6:7]
+; GFX6-NEXT: s_cselect_b64 s[10:11], s[0:1], 0
+; GFX6-NEXT: s_cselect_b64 s[0:1], s[8:9], s[6:7]
; GFX6-NEXT: s_cmp_lg_u32 s14, 0
-; GFX6-NEXT: s_cselect_b64 s[6:7], s[0:1], s[6:7]
+; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], s[0:1]
; GFX6-NEXT: s_and_b32 s0, s4, 0x7f
; GFX6-NEXT: s_sub_i32 s1, s0, 64
; GFX6-NEXT: s_sub_i32 s4, 64, s0
@@ -6392,14 +6375,14 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX6-NEXT: s_cmp_eq_u32 s0, 0
; GFX6-NEXT: v_lshr_b64 v[4:5], v[0:1], s0
; GFX6-NEXT: v_lshl_b64 v[6:7], v[2:3], s4
-; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: s_cselect_b32 s6, 1, 0
; GFX6-NEXT: v_lshr_b64 v[8:9], v[2:3], s0
; GFX6-NEXT: v_lshr_b64 v[2:3], v[2:3], s1
; GFX6-NEXT: s_and_b32 s0, 1, s5
; GFX6-NEXT: v_or_b32_e32 v4, v4, v6
; GFX6-NEXT: v_or_b32_e32 v5, v5, v7
; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
-; GFX6-NEXT: s_and_b32 s0, 1, s8
+; GFX6-NEXT: s_and_b32 s0, 1, s6
; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
; GFX6-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
; GFX6-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0
@@ -6407,10 +6390,10 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX6-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[0:1]
; GFX6-NEXT: v_cndmask_b32_e32 v2, 0, v8, vcc
; GFX6-NEXT: v_cndmask_b32_e32 v3, 0, v9, vcc
-; GFX6-NEXT: v_or_b32_e32 v0, s2, v0
-; GFX6-NEXT: v_or_b32_e32 v1, s3, v1
-; GFX6-NEXT: v_or_b32_e32 v2, s6, v2
-; GFX6-NEXT: v_or_b32_e32 v3, s7, v3
+; GFX6-NEXT: v_or_b32_e32 v0, s10, v0
+; GFX6-NEXT: v_or_b32_e32 v1, s11, v1
+; GFX6-NEXT: v_or_b32_e32 v2, s2, v2
+; GFX6-NEXT: v_or_b32_e32 v3, s3, v3
; GFX6-NEXT: ; return to shader part epilog
;
; GFX8-LABEL: v_fshr_i128_svs:
@@ -6418,26 +6401,25 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX8-NEXT: s_lshl_b64 s[6:7], s[0:1], 1
; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX8-NEXT: s_lshr_b32 s0, s1, 31
-; GFX8-NEXT: s_mov_b32 s1, 0
-; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
-; GFX8-NEXT: s_andn2_b32 s2, 0x7f, s4
+; GFX8-NEXT: s_or_b32 s2, s0, s2
+; GFX8-NEXT: s_andn2_b32 s0, 0x7f, s4
; GFX8-NEXT: s_not_b32 s5, s4
-; GFX8-NEXT: s_sub_i32 s12, s2, 64
-; GFX8-NEXT: s_sub_i32 s8, 64, s2
-; GFX8-NEXT: s_cmp_lt_u32 s2, 64
+; GFX8-NEXT: s_sub_i32 s12, s0, 64
+; GFX8-NEXT: s_sub_i32 s8, 64, s0
+; GFX8-NEXT: s_cmp_lt_u32 s0, 64
; GFX8-NEXT: s_cselect_b32 s13, 1, 0
-; GFX8-NEXT: s_cmp_eq_u32 s2, 0
+; GFX8-NEXT: s_cmp_eq_u32 s0, 0
; GFX8-NEXT: s_cselect_b32 s14, 1, 0
; GFX8-NEXT: s_lshr_b64 s[8:9], s[6:7], s8
-; GFX8-NEXT: s_lshl_b64 s[10:11], s[0:1], s5
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[6:7], s5
+; GFX8-NEXT: s_lshl_b64 s[10:11], s[2:3], s5
+; GFX8-NEXT: s_lshl_b64 s[0:1], s[6:7], s5
; GFX8-NEXT: s_or_b64 s[8:9], s[8:9], s[10:11]
; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], s12
; GFX8-NEXT: s_cmp_lg_u32 s13, 0
-; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
-; GFX8-NEXT: s_cselect_b64 s[6:7], s[8:9], s[6:7]
+; GFX8-NEXT: s_cselect_b64 s[10:11], s[0:1], 0
+; GFX8-NEXT: s_cselect_b64 s[0:1], s[8:9], s[6:7]
; GFX8-NEXT: s_cmp_lg_u32 s14, 0
-; GFX8-NEXT: s_cselect_b64 s[6:7], s[0:1], s[6:7]
+; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], s[0:1]
; GFX8-NEXT: s_and_b32 s0, s4, 0x7f
; GFX8-NEXT: s_sub_i32 s1, s0, 64
; GFX8-NEXT: s_sub_i32 s4, 64, s0
@@ -6446,14 +6428,14 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX8-NEXT: s_cmp_eq_u32 s0, 0
; GFX8-NEXT: v_lshrrev_b64 v[4:5], s0, v[0:1]
; GFX8-NEXT: v_lshlrev_b64 v[6:7], s4, v[2:3]
-; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: s_cselect_b32 s6, 1, 0
; GFX8-NEXT: v_lshrrev_b64 v[8:9], s0, v[2:3]
; GFX8-NEXT: v_lshrrev_b64 v[2:3], s1, v[2:3]
; GFX8-NEXT: s_and_b32 s0, 1, s5
; GFX8-NEXT: v_or_b32_e32 v4, v4, v6
; GFX8-NEXT: v_or_b32_e32 v5, v5, v7
; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
-; GFX8-NEXT: s_and_b32 s0, 1, s8
+; GFX8-NEXT: s_and_b32 s0, 1, s6
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0
@@ -6461,10 +6443,10 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[0:1]
; GFX8-NEXT: v_cndmask_b32_e32 v2, 0, v8, vcc
; GFX8-NEXT: v_cndmask_b32_e32 v3, 0, v9, vcc
-; GFX8-NEXT: v_or_b32_e32 v0, s2, v0
-; GFX8-NEXT: v_or_b32_e32 v1, s3, v1
-; GFX8-NEXT: v_or_b32_e32 v2, s6, v2
-; GFX8-NEXT: v_or_b32_e32 v3, s7, v3
+; GFX8-NEXT: v_or_b32_e32 v0, s10, v0
+; GFX8-NEXT: v_or_b32_e32 v1, s11, v1
+; GFX8-NEXT: v_or_b32_e32 v2, s2, v2
+; GFX8-NEXT: v_or_b32_e32 v3, s3, v3
; GFX8-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: v_fshr_i128_svs:
@@ -6472,26 +6454,25 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX9-NEXT: s_lshl_b64 s[6:7], s[0:1], 1
; GFX9-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX9-NEXT: s_lshr_b32 s0, s1, 31
-; GFX9-NEXT: s_mov_b32 s1, 0
-; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
-; GFX9-NEXT: s_andn2_b32 s2, 0x7f, s4
+; GFX9-NEXT: s_or_b32 s2, s0, s2
+; GFX9-NEXT: s_andn2_b32 s0, 0x7f, s4
; GFX9-NEXT: s_not_b32 s5, s4
-; GFX9-NEXT: s_sub_i32 s12, s2, 64
-; GFX9-NEXT: s_sub_i32 s8, 64, s2
-; GFX9-NEXT: s_cmp_lt_u32 s2, 64
+; GFX9-NEXT: s_sub_i32 s12, s0, 64
+; GFX9-NEXT: s_sub_i32 s8, 64, s0
+; GFX9-NEXT: s_cmp_lt_u32 s0, 64
; GFX9-NEXT: s_cselect_b32 s13, 1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s2, 0
+; GFX9-NEXT: s_cmp_eq_u32 s0, 0
; GFX9-NEXT: s_cselect_b32 s14, 1, 0
; GFX9-NEXT: s_lshr_b64 s[8:9], s[6:7], s8
-; GFX9-NEXT: s_lshl_b64 s[10:11], s[0:1], s5
-; GFX9-NEXT: s_lshl_b64 s[2:3], s[6:7], s5
+; GFX9-NEXT: s_lshl_b64 s[10:11], s[2:3], s5
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[6:7], s5
; GFX9-NEXT: s_or_b64 s[8:9], s[8:9], s[10:11]
; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], s12
; GFX9-NEXT: s_cmp_lg_u32 s13, 0
-; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
-; GFX9-NEXT: s_cselect_b64 s[6:7], s[8:9], s[6:7]
+; GFX9-NEXT: s_cselect_b64 s[10:11], s[0:1], 0
+; GFX9-NEXT: s_cselect_b64 s[0:1], s[8:9], s[6:7]
; GFX9-NEXT: s_cmp_lg_u32 s14, 0
-; GFX9-NEXT: s_cselect_b64 s[6:7], s[0:1], s[6:7]
+; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], s[0:1]
; GFX9-NEXT: s_and_b32 s0, s4, 0x7f
; GFX9-NEXT: s_sub_i32 s1, s0, 64
; GFX9-NEXT: s_sub_i32 s4, 64, s0
@@ -6500,14 +6481,14 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX9-NEXT: s_cmp_eq_u32 s0, 0
; GFX9-NEXT: v_lshrrev_b64 v[4:5], s0, v[0:1]
; GFX9-NEXT: v_lshlrev_b64 v[6:7], s4, v[2:3]
-; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: s_cselect_b32 s6, 1, 0
; GFX9-NEXT: v_lshrrev_b64 v[8:9], s0, v[2:3]
; GFX9-NEXT: v_lshrrev_b64 v[2:3], s1, v[2:3]
; GFX9-NEXT: s_and_b32 s0, 1, s5
; GFX9-NEXT: v_or_b32_e32 v4, v4, v6
; GFX9-NEXT: v_or_b32_e32 v5, v5, v7
; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
-; GFX9-NEXT: s_and_b32 s0, 1, s8
+; GFX9-NEXT: s_and_b32 s0, 1, s6
; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0
@@ -6515,20 +6496,19 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[0:1]
; GFX9-NEXT: v_cndmask_b32_e32 v2, 0, v8, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v9, vcc
-; GFX9-NEXT: v_or_b32_e32 v0, s2, v0
-; GFX9-NEXT: v_or_b32_e32 v1, s3, v1
-; GFX9-NEXT: v_or_b32_e32 v2, s6, v2
-; GFX9-NEXT: v_or_b32_e32 v3, s7, v3
+; GFX9-NEXT: v_or_b32_e32 v0, s10, v0
+; GFX9-NEXT: v_or_b32_e32 v1, s11, v1
+; GFX9-NEXT: v_or_b32_e32 v2, s2, v2
+; GFX9-NEXT: v_or_b32_e32 v3, s3, v3
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: v_fshr_i128_svs:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX10-NEXT: s_lshr_b32 s6, s1, 31
-; GFX10-NEXT: s_mov_b32 s7, 0
-; GFX10-NEXT: s_andn2_b32 s5, 0x7f, s4
+; GFX10-NEXT: s_lshr_b32 s5, s1, 31
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
-; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
+; GFX10-NEXT: s_or_b32 s2, s5, s2
+; GFX10-NEXT: s_andn2_b32 s5, 0x7f, s4
; GFX10-NEXT: s_not_b32 s10, s4
; GFX10-NEXT: s_sub_i32 s12, s5, 64
; GFX10-NEXT: s_sub_i32 s6, 64, s5
@@ -6578,11 +6558,10 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX11-LABEL: v_fshr_i128_svs:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX11-NEXT: s_lshr_b32 s6, s1, 31
-; GFX11-NEXT: s_mov_b32 s7, 0
-; GFX11-NEXT: s_and_not1_b32 s5, 0x7f, s4
+; GFX11-NEXT: s_lshr_b32 s5, s1, 31
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
-; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
+; GFX11-NEXT: s_or_b32 s2, s5, s2
+; GFX11-NEXT: s_and_not1_b32 s5, 0x7f, s4
; GFX11-NEXT: s_not_b32 s10, s4
; GFX11-NEXT: s_sub_i32 s12, s5, 64
; GFX11-NEXT: s_sub_i32 s6, 64, s5
@@ -6643,7 +6622,7 @@ define amdgpu_ps <4 x float> @v_fshr_i128_vss(i128 %lhs, i128 inreg %rhs, i128 i
; GFX6-NEXT: v_lshl_b64 v[4:5], v[0:1], 1
; GFX6-NEXT: v_lshrrev_b32_e32 v0, 31, v1
; GFX6-NEXT: s_andn2_b32 s5, 0x7f, s4
-; GFX6-NEXT: v_or_b32_e32 v2, v2, v0
+; GFX6-NEXT: v_or_b32_e32 v2, v0, v2
; GFX6-NEXT: s_sub_i32 s6, s5, 64
; GFX6-NEXT: s_sub_i32 s7, 64, s5
; GFX6-NEXT: s_cmp_lt_u32 s5, 64
@@ -6696,7 +6675,7 @@ define amdgpu_ps <4 x float> @v_fshr_i128_vss(i128 %lhs, i128 inreg %rhs, i128 i
; GFX8-NEXT: v_lshlrev_b64 v[4:5], 1, v[0:1]
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 31, v1
; GFX8-NEXT: s_andn2_b32 s5, 0x7f, s4
-; GFX8-NEXT: v_or_b32_e32 v2, v2, v0
+; GFX8-NEXT: v_or_b32_e32 v2, v0, v2
; GFX8-NEXT: s_sub_i32 s6, s5, 64
; GFX8-NEXT: s_sub_i32 s7, 64, s5
; GFX8-NEXT: s_cmp_lt_u32 s5, 64
@@ -6749,7 +6728,7 @@ define amdgpu_ps <4 x float> @v_fshr_i128_vss(i128 %lhs, i128 inreg %rhs, i128 i
; GFX9-NEXT: v_lshlrev_b64 v[4:5], 1, v[0:1]
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 31, v1
; GFX9-NEXT: s_andn2_b32 s5, 0x7f, s4
-; GFX9-NEXT: v_or_b32_e32 v2, v2, v0
+; GFX9-NEXT: v_or_b32_e32 v2, v0, v2
; GFX9-NEXT: s_sub_i32 s6, s5, 64
; GFX9-NEXT: s_sub_i32 s7, 64, s5
; GFX9-NEXT: s_cmp_lt_u32 s5, 64
@@ -6803,7 +6782,7 @@ define amdgpu_ps <4 x float> @v_fshr_i128_vss(i128 %lhs, i128 inreg %rhs, i128 i
; GFX10-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
; GFX10-NEXT: s_andn2_b32 s5, 0x7f, s4
; GFX10-NEXT: s_sub_i32 s7, 64, s5
-; GFX10-NEXT: v_or_b32_e32 v2, v2, v4
+; GFX10-NEXT: v_or_b32_e32 v2, v4, v2
; GFX10-NEXT: s_sub_i32 s6, s5, 64
; GFX10-NEXT: s_cmp_lt_u32 s5, 64
; GFX10-NEXT: v_lshrrev_b64 v[4:5], s7, v[0:1]
@@ -6857,7 +6836,7 @@ define amdgpu_ps <4 x float> @v_fshr_i128_vss(i128 %lhs, i128 inreg %rhs, i128 i
; GFX11-NEXT: s_and_not1_b32 s5, 0x7f, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: s_sub_i32 s7, 64, s5
-; GFX11-NEXT: v_or_b32_e32 v2, v2, v4
+; GFX11-NEXT: v_or_b32_e32 v2, v4, v2
; GFX11-NEXT: s_sub_i32 s6, s5, 64
; GFX11-NEXT: s_cmp_lt_u32 s5, 64
; GFX11-NEXT: v_lshrrev_b64 v[4:5], s7, v[0:1]
@@ -7033,81 +7012,80 @@ define i128 @v_fshr_i128_65(i128 %lhs, i128 %rhs) {
define amdgpu_ps <2 x i128> @s_fshr_v2i128(<2 x i128> inreg %lhs, <2 x i128> inreg %rhs, <2 x i128> inreg %amt) {
; GFX6-LABEL: s_fshr_v2i128:
; GFX6: ; %bb.0:
-; GFX6-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX6-NEXT: s_lshr_b32 s22, s1, 31
-; GFX6-NEXT: s_mov_b32 s23, 0
; GFX6-NEXT: s_lshl_b64 s[18:19], s[0:1], 1
-; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[22:23]
-; GFX6-NEXT: s_andn2_b32 s2, 0x7f, s16
+; GFX6-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
+; GFX6-NEXT: s_lshr_b32 s0, s1, 31
+; GFX6-NEXT: s_or_b32 s2, s0, s2
+; GFX6-NEXT: s_andn2_b32 s0, 0x7f, s16
; GFX6-NEXT: s_not_b32 s17, s16
-; GFX6-NEXT: s_sub_i32 s21, s2, 64
-; GFX6-NEXT: s_sub_i32 s22, 64, s2
-; GFX6-NEXT: s_cmp_lt_u32 s2, 64
-; GFX6-NEXT: s_cselect_b32 s28, 1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s2, 0
-; GFX6-NEXT: s_cselect_b32 s29, 1, 0
-; GFX6-NEXT: s_lshr_b64 s[24:25], s[18:19], s22
-; GFX6-NEXT: s_lshl_b64 s[26:27], s[0:1], s17
-; GFX6-NEXT: s_lshl_b64 s[2:3], s[18:19], s17
-; GFX6-NEXT: s_or_b64 s[24:25], s[24:25], s[26:27]
-; GFX6-NEXT: s_lshl_b64 s[18:19], s[18:19], s21
-; GFX6-NEXT: s_cmp_lg_u32 s28, 0
-; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
-; GFX6-NEXT: s_cselect_b64 s[18:19], s[24:25], s[18:19]
-; GFX6-NEXT: s_cmp_lg_u32 s29, 0
-; GFX6-NEXT: s_cselect_b64 s[18:19], s[0:1], s[18:19]
-; GFX6-NEXT: s_and_b32 s0, s16, 0x7f
; GFX6-NEXT: s_sub_i32 s21, s0, 64
; GFX6-NEXT: s_sub_i32 s22, 64, s0
; GFX6-NEXT: s_cmp_lt_u32 s0, 64
; GFX6-NEXT: s_cselect_b32 s26, 1, 0
; GFX6-NEXT: s_cmp_eq_u32 s0, 0
; GFX6-NEXT: s_cselect_b32 s27, 1, 0
-; GFX6-NEXT: s_lshr_b64 s[0:1], s[10:11], s16
+; GFX6-NEXT: s_lshr_b64 s[22:23], s[18:19], s22
+; GFX6-NEXT: s_lshl_b64 s[24:25], s[2:3], s17
+; GFX6-NEXT: s_lshl_b64 s[0:1], s[18:19], s17
+; GFX6-NEXT: s_or_b64 s[22:23], s[22:23], s[24:25]
+; GFX6-NEXT: s_lshl_b64 s[18:19], s[18:19], s21
+; GFX6-NEXT: s_cmp_lg_u32 s26, 0
+; GFX6-NEXT: s_cselect_b64 s[0:1], s[0:1], 0
+; GFX6-NEXT: s_cselect_b64 s[18:19], s[22:23], s[18:19]
+; GFX6-NEXT: s_cmp_lg_u32 s27, 0
+; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], s[18:19]
+; GFX6-NEXT: s_and_b32 s17, s16, 0x7f
+; GFX6-NEXT: s_sub_i32 s21, s17, 64
+; GFX6-NEXT: s_sub_i32 s22, 64, s17
+; GFX6-NEXT: s_cmp_lt_u32 s17, 64
+; GFX6-NEXT: s_cselect_b32 s24, 1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s17, 0
+; GFX6-NEXT: s_cselect_b32 s25, 1, 0
+; GFX6-NEXT: s_lshr_b64 s[18:19], s[10:11], s16
; GFX6-NEXT: s_lshr_b64 s[16:17], s[8:9], s16
-; GFX6-NEXT: s_lshl_b64 s[24:25], s[10:11], s22
-; GFX6-NEXT: s_or_b64 s[16:17], s[16:17], s[24:25]
+; GFX6-NEXT: s_lshl_b64 s[22:23], s[10:11], s22
+; GFX6-NEXT: s_or_b64 s[16:17], s[16:17], s[22:23]
; GFX6-NEXT: s_lshr_b64 s[10:11], s[10:11], s21
-; GFX6-NEXT: s_cmp_lg_u32 s26, 0
+; GFX6-NEXT: s_cmp_lg_u32 s24, 0
; GFX6-NEXT: s_cselect_b64 s[10:11], s[16:17], s[10:11]
-; GFX6-NEXT: s_cmp_lg_u32 s27, 0
+; GFX6-NEXT: s_cmp_lg_u32 s25, 0
; GFX6-NEXT: s_cselect_b64 s[8:9], s[8:9], s[10:11]
-; GFX6-NEXT: s_cmp_lg_u32 s26, 0
-; GFX6-NEXT: s_cselect_b64 s[10:11], s[0:1], 0
-; GFX6-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
-; GFX6-NEXT: s_lshr_b32 s22, s5, 31
-; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[8:9]
+; GFX6-NEXT: s_cmp_lg_u32 s24, 0
+; GFX6-NEXT: s_cselect_b64 s[10:11], s[18:19], 0
+; GFX6-NEXT: s_or_b64 s[0:1], s[0:1], s[8:9]
; GFX6-NEXT: s_lshl_b64 s[8:9], s[4:5], 1
-; GFX6-NEXT: s_or_b64 s[4:5], s[6:7], s[22:23]
-; GFX6-NEXT: s_andn2_b32 s6, 0x7f, s20
-; GFX6-NEXT: s_or_b64 s[2:3], s[18:19], s[10:11]
+; GFX6-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
+; GFX6-NEXT: s_lshr_b32 s4, s5, 31
+; GFX6-NEXT: s_or_b32 s6, s4, s6
+; GFX6-NEXT: s_andn2_b32 s4, 0x7f, s20
+; GFX6-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11]
; GFX6-NEXT: s_not_b32 s16, s20
-; GFX6-NEXT: s_sub_i32 s18, s6, 64
-; GFX6-NEXT: s_sub_i32 s10, 64, s6
-; GFX6-NEXT: s_cmp_lt_u32 s6, 64
+; GFX6-NEXT: s_sub_i32 s18, s4, 64
+; GFX6-NEXT: s_sub_i32 s10, 64, s4
+; GFX6-NEXT: s_cmp_lt_u32 s4, 64
; GFX6-NEXT: s_cselect_b32 s19, 1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s6, 0
+; GFX6-NEXT: s_cmp_eq_u32 s4, 0
; GFX6-NEXT: s_cselect_b32 s21, 1, 0
-; GFX6-NEXT: s_lshl_b64 s[6:7], s[8:9], s16
+; GFX6-NEXT: s_lshl_b64 s[4:5], s[8:9], s16
; GFX6-NEXT: s_lshr_b64 s[10:11], s[8:9], s10
-; GFX6-NEXT: s_lshl_b64 s[16:17], s[4:5], s16
+; GFX6-NEXT: s_lshl_b64 s[16:17], s[6:7], s16
; GFX6-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
; GFX6-NEXT: s_lshl_b64 s[8:9], s[8:9], s18
; GFX6-NEXT: s_cmp_lg_u32 s19, 0
-; GFX6-NEXT: s_cselect_b64 s[6:7], s[6:7], 0
+; GFX6-NEXT: s_cselect_b64 s[4:5], s[4:5], 0
; GFX6-NEXT: s_cselect_b64 s[8:9], s[10:11], s[8:9]
; GFX6-NEXT: s_cmp_lg_u32 s21, 0
-; GFX6-NEXT: s_cselect_b64 s[8:9], s[4:5], s[8:9]
-; GFX6-NEXT: s_and_b32 s4, s20, 0x7f
-; GFX6-NEXT: s_sub_i32 s18, s4, 64
-; GFX6-NEXT: s_sub_i32 s16, 64, s4
-; GFX6-NEXT: s_cmp_lt_u32 s4, 64
+; GFX6-NEXT: s_cselect_b64 s[6:7], s[6:7], s[8:9]
+; GFX6-NEXT: s_and_b32 s8, s20, 0x7f
+; GFX6-NEXT: s_sub_i32 s18, s8, 64
+; GFX6-NEXT: s_sub_i32 s16, 64, s8
+; GFX6-NEXT: s_cmp_lt_u32 s8, 64
; GFX6-NEXT: s_cselect_b32 s19, 1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s4, 0
+; GFX6-NEXT: s_cmp_eq_u32 s8, 0
; GFX6-NEXT: s_cselect_b32 s21, 1, 0
; GFX6-NEXT: s_lshr_b64 s[10:11], s[12:13], s20
; GFX6-NEXT: s_lshl_b64 s[16:17], s[14:15], s16
-; GFX6-NEXT: s_lshr_b64 s[4:5], s[14:15], s20
+; GFX6-NEXT: s_lshr_b64 s[8:9], s[14:15], s20
; GFX6-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
; GFX6-NEXT: s_lshr_b64 s[14:15], s[14:15], s18
; GFX6-NEXT: s_cmp_lg_u32 s19, 0
@@ -7115,88 +7093,87 @@ define amdgpu_ps <2 x i128> @s_fshr_v2i128(<2 x i128> inreg %lhs, <2 x i128> inr
; GFX6-NEXT: s_cmp_lg_u32 s21, 0
; GFX6-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
; GFX6-NEXT: s_cmp_lg_u32 s19, 0
-; GFX6-NEXT: s_cselect_b64 s[12:13], s[4:5], 0
-; GFX6-NEXT: s_or_b64 s[4:5], s[6:7], s[10:11]
-; GFX6-NEXT: s_or_b64 s[6:7], s[8:9], s[12:13]
+; GFX6-NEXT: s_cselect_b64 s[8:9], s[8:9], 0
+; GFX6-NEXT: s_or_b64 s[4:5], s[4:5], s[10:11]
+; GFX6-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
; GFX6-NEXT: ; return to shader part epilog
;
; GFX8-LABEL: s_fshr_v2i128:
; GFX8: ; %bb.0:
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX8-NEXT: s_lshr_b32 s22, s1, 31
-; GFX8-NEXT: s_mov_b32 s23, 0
; GFX8-NEXT: s_lshl_b64 s[18:19], s[0:1], 1
-; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[22:23]
-; GFX8-NEXT: s_andn2_b32 s2, 0x7f, s16
+; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
+; GFX8-NEXT: s_lshr_b32 s0, s1, 31
+; GFX8-NEXT: s_or_b32 s2, s0, s2
+; GFX8-NEXT: s_andn2_b32 s0, 0x7f, s16
; GFX8-NEXT: s_not_b32 s17, s16
-; GFX8-NEXT: s_sub_i32 s21, s2, 64
-; GFX8-NEXT: s_sub_i32 s22, 64, s2
-; GFX8-NEXT: s_cmp_lt_u32 s2, 64
-; GFX8-NEXT: s_cselect_b32 s28, 1, 0
-; GFX8-NEXT: s_cmp_eq_u32 s2, 0
-; GFX8-NEXT: s_cselect_b32 s29, 1, 0
-; GFX8-NEXT: s_lshr_b64 s[24:25], s[18:19], s22
-; GFX8-NEXT: s_lshl_b64 s[26:27], s[0:1], s17
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[18:19], s17
-; GFX8-NEXT: s_or_b64 s[24:25], s[24:25], s[26:27]
-; GFX8-NEXT: s_lshl_b64 s[18:19], s[18:19], s21
-; GFX8-NEXT: s_cmp_lg_u32 s28, 0
-; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
-; GFX8-NEXT: s_cselect_b64 s[18:19], s[24:25], s[18:19]
-; GFX8-NEXT: s_cmp_lg_u32 s29, 0
-; GFX8-NEXT: s_cselect_b64 s[18:19], s[0:1], s[18:19]
-; GFX8-NEXT: s_and_b32 s0, s16, 0x7f
; GFX8-NEXT: s_sub_i32 s21, s0, 64
; GFX8-NEXT: s_sub_i32 s22, 64, s0
; GFX8-NEXT: s_cmp_lt_u32 s0, 64
; GFX8-NEXT: s_cselect_b32 s26, 1, 0
; GFX8-NEXT: s_cmp_eq_u32 s0, 0
; GFX8-NEXT: s_cselect_b32 s27, 1, 0
-; GFX8-NEXT: s_lshr_b64 s[0:1], s[10:11], s16
+; GFX8-NEXT: s_lshr_b64 s[22:23], s[18:19], s22
+; GFX8-NEXT: s_lshl_b64 s[24:25], s[2:3], s17
+; GFX8-NEXT: s_lshl_b64 s[0:1], s[18:19], s17
+; GFX8-NEXT: s_or_b64 s[22:23], s[22:23], s[24:25]
+; GFX8-NEXT: s_lshl_b64 s[18:19], s[18:19], s21
+; GFX8-NEXT: s_cmp_lg_u32 s26, 0
+; GFX8-NEXT: s_cselect_b64 s[0:1], s[0:1], 0
+; GFX8-NEXT: s_cselect_b64 s[18:19], s[22:23], s[18:19]
+; GFX8-NEXT: s_cmp_lg_u32 s27, 0
+; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], s[18:19]
+; GFX8-NEXT: s_and_b32 s17, s16, 0x7f
+; GFX8-NEXT: s_sub_i32 s21, s17, 64
+; GFX8-NEXT: s_sub_i32 s22, 64, s17
+; GFX8-NEXT: s_cmp_lt_u32 s17, 64
+; GFX8-NEXT: s_cselect_b32 s24, 1, 0
+; GFX8-NEXT: s_cmp_eq_u32 s17, 0
+; GFX8-NEXT: s_cselect_b32 s25, 1, 0
+; GFX8-NEXT: s_lshr_b64 s[18:19], s[10:11], s16
; GFX8-NEXT: s_lshr_b64 s[16:17], s[8:9], s16
-; GFX8-NEXT: s_lshl_b64 s[24:25], s[10:11], s22
-; GFX8-NEXT: s_or_b64 s[16:17], s[16:17], s[24:25]
+; GFX8-NEXT: s_lshl_b64 s[22:23], s[10:11], s22
+; GFX8-NEXT: s_or_b64 s[16:17], s[16:17], s[22:23]
; GFX8-NEXT: s_lshr_b64 s[10:11], s[10:11], s21
-; GFX8-NEXT: s_cmp_lg_u32 s26, 0
+; GFX8-NEXT: s_cmp_lg_u32 s24, 0
; GFX8-NEXT: s_cselect_b64 s[10:11], s[16:17], s[10:11]
-; GFX8-NEXT: s_cmp_lg_u32 s27, 0
+; GFX8-NEXT: s_cmp_lg_u32 s25, 0
; GFX8-NEXT: s_cselect_b64 s[8:9], s[8:9], s[10:11]
-; GFX8-NEXT: s_cmp_lg_u32 s26, 0
-; GFX8-NEXT: s_cselect_b64 s[10:11], s[0:1], 0
-; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
-; GFX8-NEXT: s_lshr_b32 s22, s5, 31
-; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[8:9]
+; GFX8-NEXT: s_cmp_lg_u32 s24, 0
+; GFX8-NEXT: s_cselect_b64 s[10:11], s[18:19], 0
+; GFX8-NEXT: s_or_b64 s[0:1], s[0:1], s[8:9]
; GFX8-NEXT: s_lshl_b64 s[8:9], s[4:5], 1
-; GFX8-NEXT: s_or_b64 s[4:5], s[6:7], s[22:23]
-; GFX8-NEXT: s_andn2_b32 s6, 0x7f, s20
-; GFX8-NEXT: s_or_b64 s[2:3], s[18:19], s[10:11]
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
+; GFX8-NEXT: s_lshr_b32 s4, s5, 31
+; GFX8-NEXT: s_or_b32 s6, s4, s6
+; GFX8-NEXT: s_andn2_b32 s4, 0x7f, s20
+; GFX8-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11]
; GFX8-NEXT: s_not_b32 s16, s20
-; GFX8-NEXT: s_sub_i32 s18, s6, 64
-; GFX8-NEXT: s_sub_i32 s10, 64, s6
-; GFX8-NEXT: s_cmp_lt_u32 s6, 64
+; GFX8-NEXT: s_sub_i32 s18, s4, 64
+; GFX8-NEXT: s_sub_i32 s10, 64, s4
+; GFX8-NEXT: s_cmp_lt_u32 s4, 64
; GFX8-NEXT: s_cselect_b32 s19, 1, 0
-; GFX8-NEXT: s_cmp_eq_u32 s6, 0
+; GFX8-NEXT: s_cmp_eq_u32 s4, 0
; GFX8-NEXT: s_cselect_b32 s21, 1, 0
-; GFX8-NEXT: s_lshl_b64 s[6:7], s[8:9], s16
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[8:9], s16
; GFX8-NEXT: s_lshr_b64 s[10:11], s[8:9], s10
-; GFX8-NEXT: s_lshl_b64 s[16:17], s[4:5], s16
+; GFX8-NEXT: s_lshl_b64 s[16:17], s[6:7], s16
; GFX8-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
; GFX8-NEXT: s_lshl_b64 s[8:9], s[8:9], s18
; GFX8-NEXT: s_cmp_lg_u32 s19, 0
-; GFX8-NEXT: s_cselect_b64 s[6:7], s[6:7], 0
+; GFX8-NEXT: s_cselect_b64 s[4:5], s[4:5], 0
; GFX8-NEXT: s_cselect_b64 s[8:9], s[10:11], s[8:9]
; GFX8-NEXT: s_cmp_lg_u32 s21, 0
-; GFX8-NEXT: s_cselect_b64 s[8:9], s[4:5], s[8:9]
-; GFX8-NEXT: s_and_b32 s4, s20, 0x7f
-; GFX8-NEXT: s_sub_i32 s18, s4, 64
-; GFX8-NEXT: s_sub_i32 s16, 64, s4
-; GFX8-NEXT: s_cmp_lt_u32 s4, 64
+; GFX8-NEXT: s_cselect_b64 s[6:7], s[6:7], s[8:9]
+; GFX8-NEXT: s_and_b32 s8, s20, 0x7f
+; GFX8-NEXT: s_sub_i32 s18, s8, 64
+; GFX8-NEXT: s_sub_i32 s16, 64, s8
+; GFX8-NEXT: s_cmp_lt_u32 s8, 64
; GFX8-NEXT: s_cselect_b32 s19, 1, 0
-; GFX8-NEXT: s_cmp_eq_u32 s4, 0
+; GFX8-NEXT: s_cmp_eq_u32 s8, 0
; GFX8-NEXT: s_cselect_b32 s21, 1, 0
; GFX8-NEXT: s_lshr_b64 s[10:11], s[12:13], s20
; GFX8-NEXT: s_lshl_b64 s[16:17], s[14:15], s16
-; GFX8-NEXT: s_lshr_b64 s[4:5], s[14:15], s20
+; GFX8-NEXT: s_lshr_b64 s[8:9], s[14:15], s20
; GFX8-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
; GFX8-NEXT: s_lshr_b64 s[14:15], s[14:15], s18
; GFX8-NEXT: s_cmp_lg_u32 s19, 0
@@ -7204,88 +7181,87 @@ define amdgpu_ps <2 x i128> @s_fshr_v2i128(<2 x i128> inreg %lhs, <2 x i128> inr
; GFX8-NEXT: s_cmp_lg_u32 s21, 0
; GFX8-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
; GFX8-NEXT: s_cmp_lg_u32 s19, 0
-; GFX8-NEXT: s_cselect_b64 s[12:13], s[4:5], 0
-; GFX8-NEXT: s_or_b64 s[4:5], s[6:7], s[10:11]
-; GFX8-NEXT: s_or_b64 s[6:7], s[8:9], s[12:13]
+; GFX8-NEXT: s_cselect_b64 s[8:9], s[8:9], 0
+; GFX8-NEXT: s_or_b64 s[4:5], s[4:5], s[10:11]
+; GFX8-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
; GFX8-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_fshr_v2i128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX9-NEXT: s_lshr_b32 s22, s1, 31
-; GFX9-NEXT: s_mov_b32 s23, 0
; GFX9-NEXT: s_lshl_b64 s[18:19], s[0:1], 1
-; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[22:23]
-; GFX9-NEXT: s_andn2_b32 s2, 0x7f, s16
+; GFX9-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
+; GFX9-NEXT: s_lshr_b32 s0, s1, 31
+; GFX9-NEXT: s_or_b32 s2, s0, s2
+; GFX9-NEXT: s_andn2_b32 s0, 0x7f, s16
; GFX9-NEXT: s_not_b32 s17, s16
-; GFX9-NEXT: s_sub_i32 s21, s2, 64
-; GFX9-NEXT: s_sub_i32 s22, 64, s2
-; GFX9-NEXT: s_cmp_lt_u32 s2, 64
-; GFX9-NEXT: s_cselect_b32 s28, 1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s2, 0
-; GFX9-NEXT: s_cselect_b32 s29, 1, 0
-; GFX9-NEXT: s_lshr_b64 s[24:25], s[18:19], s22
-; GFX9-NEXT: s_lshl_b64 s[26:27], s[0:1], s17
-; GFX9-NEXT: s_lshl_b64 s[2:3], s[18:19], s17
-; GFX9-NEXT: s_or_b64 s[24:25], s[24:25], s[26:27]
-; GFX9-NEXT: s_lshl_b64 s[18:19], s[18:19], s21
-; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
-; GFX9-NEXT: s_cselect_b64 s[18:19], s[24:25], s[18:19]
-; GFX9-NEXT: s_cmp_lg_u32 s29, 0
-; GFX9-NEXT: s_cselect_b64 s[18:19], s[0:1], s[18:19]
-; GFX9-NEXT: s_and_b32 s0, s16, 0x7f
; GFX9-NEXT: s_sub_i32 s21, s0, 64
; GFX9-NEXT: s_sub_i32 s22, 64, s0
; GFX9-NEXT: s_cmp_lt_u32 s0, 64
; GFX9-NEXT: s_cselect_b32 s26, 1, 0
; GFX9-NEXT: s_cmp_eq_u32 s0, 0
; GFX9-NEXT: s_cselect_b32 s27, 1, 0
-; GFX9-NEXT: s_lshr_b64 s[0:1], s[10:11], s16
+; GFX9-NEXT: s_lshr_b64 s[22:23], s[18:19], s22
+; GFX9-NEXT: s_lshl_b64 s[24:25], s[2:3], s17
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[18:19], s17
+; GFX9-NEXT: s_or_b64 s[22:23], s[22:23], s[24:25]
+; GFX9-NEXT: s_lshl_b64 s[18:19], s[18:19], s21
+; GFX9-NEXT: s_cmp_lg_u32 s26, 0
+; GFX9-NEXT: s_cselect_b64 s[0:1], s[0:1], 0
+; GFX9-NEXT: s_cselect_b64 s[18:19], s[22:23], s[18:19]
+; GFX9-NEXT: s_cmp_lg_u32 s27, 0
+; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], s[18:19]
+; GFX9-NEXT: s_and_b32 s17, s16, 0x7f
+; GFX9-NEXT: s_sub_i32 s21, s17, 64
+; GFX9-NEXT: s_sub_i32 s22, 64, s17
+; GFX9-NEXT: s_cmp_lt_u32 s17, 64
+; GFX9-NEXT: s_cselect_b32 s24, 1, 0
+; GFX9-NEXT: s_cmp_eq_u32 s17, 0
+; GFX9-NEXT: s_cselect_b32 s25, 1, 0
+; GFX9-NEXT: s_lshr_b64 s[18:19], s[10:11], s16
; GFX9-NEXT: s_lshr_b64 s[16:17], s[8:9], s16
-; GFX9-NEXT: s_lshl_b64 s[24:25], s[10:11], s22
-; GFX9-NEXT: s_or_b64 s[16:17], s[16:17], s[24:25]
+; GFX9-NEXT: s_lshl_b64 s[22:23], s[10:11], s22
+; GFX9-NEXT: s_or_b64 s[16:17], s[16:17], s[22:23]
; GFX9-NEXT: s_lshr_b64 s[10:11], s[10:11], s21
-; GFX9-NEXT: s_cmp_lg_u32 s26, 0
+; GFX9-NEXT: s_cmp_lg_u32 s24, 0
; GFX9-NEXT: s_cselect_b64 s[10:11], s[16:17], s[10:11]
-; GFX9-NEXT: s_cmp_lg_u32 s27, 0
+; GFX9-NEXT: s_cmp_lg_u32 s25, 0
; GFX9-NEXT: s_cselect_b64 s[8:9], s[8:9], s[10:11]
-; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cselect_b64 s[10:11], s[0:1], 0
-; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
-; GFX9-NEXT: s_lshr_b32 s22, s5, 31
-; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[8:9]
+; GFX9-NEXT: s_cmp_lg_u32 s24, 0
+; GFX9-NEXT: s_cselect_b64 s[10:11], s[18:19], 0
+; GFX9-NEXT: s_or_b64 s[0:1], s[0:1], s[8:9]
; GFX9-NEXT: s_lshl_b64 s[8:9], s[4:5], 1
-; GFX9-NEXT: s_or_b64 s[4:5], s[6:7], s[22:23]
-; GFX9-NEXT: s_andn2_b32 s6, 0x7f, s20
-; GFX9-NEXT: s_or_b64 s[2:3], s[18:19], s[10:11]
+; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
+; GFX9-NEXT: s_lshr_b32 s4, s5, 31
+; GFX9-NEXT: s_or_b32 s6, s4, s6
+; GFX9-NEXT: s_andn2_b32 s4, 0x7f, s20
+; GFX9-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11]
; GFX9-NEXT: s_not_b32 s16, s20
-; GFX9-NEXT: s_sub_i32 s18, s6, 64
-; GFX9-NEXT: s_sub_i32 s10, 64, s6
-; GFX9-NEXT: s_cmp_lt_u32 s6, 64
+; GFX9-NEXT: s_sub_i32 s18, s4, 64
+; GFX9-NEXT: s_sub_i32 s10, 64, s4
+; GFX9-NEXT: s_cmp_lt_u32 s4, 64
; GFX9-NEXT: s_cselect_b32 s19, 1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s6, 0
+; GFX9-NEXT: s_cmp_eq_u32 s4, 0
; GFX9-NEXT: s_cselect_b32 s21, 1, 0
-; GFX9-NEXT: s_lshl_b64 s[6:7], s[8:9], s16
+; GFX9-NEXT: s_lshl_b64 s[4:5], s[8:9], s16
; GFX9-NEXT: s_lshr_b64 s[10:11], s[8:9], s10
-; GFX9-NEXT: s_lshl_b64 s[16:17], s[4:5], s16
+; GFX9-NEXT: s_lshl_b64 s[16:17], s[6:7], s16
; GFX9-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
; GFX9-NEXT: s_lshl_b64 s[8:9], s[8:9], s18
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cselect_b64 s[6:7], s[6:7], 0
+; GFX9-NEXT: s_cselect_b64 s[4:5], s[4:5], 0
; GFX9-NEXT: s_cselect_b64 s[8:9], s[10:11], s[8:9]
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cselect_b64 s[8:9], s[4:5], s[8:9]
-; GFX9-NEXT: s_and_b32 s4, s20, 0x7f
-; GFX9-NEXT: s_sub_i32 s18, s4, 64
-; GFX9-NEXT: s_sub_i32 s16, 64, s4
-; GFX9-NEXT: s_cmp_lt_u32 s4, 64
+; GFX9-NEXT: s_cselect_b64 s[6:7], s[6:7], s[8:9]
+; GFX9-NEXT: s_and_b32 s8, s20, 0x7f
+; GFX9-NEXT: s_sub_i32 s18, s8, 64
+; GFX9-NEXT: s_sub_i32 s16, 64, s8
+; GFX9-NEXT: s_cmp_lt_u32 s8, 64
; GFX9-NEXT: s_cselect_b32 s19, 1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s4, 0
+; GFX9-NEXT: s_cmp_eq_u32 s8, 0
; GFX9-NEXT: s_cselect_b32 s21, 1, 0
; GFX9-NEXT: s_lshr_b64 s[10:11], s[12:13], s20
; GFX9-NEXT: s_lshl_b64 s[16:17], s[14:15], s16
-; GFX9-NEXT: s_lshr_b64 s[4:5], s[14:15], s20
+; GFX9-NEXT: s_lshr_b64 s[8:9], s[14:15], s20
; GFX9-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
; GFX9-NEXT: s_lshr_b64 s[14:15], s[14:15], s18
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
@@ -7293,61 +7269,60 @@ define amdgpu_ps <2 x i128> @s_fshr_v2i128(<2 x i128> inreg %lhs, <2 x i128> inr
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
; GFX9-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cselect_b64 s[12:13], s[4:5], 0
-; GFX9-NEXT: s_or_b64 s[4:5], s[6:7], s[10:11]
-; GFX9-NEXT: s_or_b64 s[6:7], s[8:9], s[12:13]
+; GFX9-NEXT: s_cselect_b64 s[8:9], s[8:9], 0
+; GFX9-NEXT: s_or_b64 s[4:5], s[4:5], s[10:11]
+; GFX9-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: s_fshr_v2i128:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX10-NEXT: s_lshr_b32 s18, s1, 31
-; GFX10-NEXT: s_mov_b32 s19, 0
-; GFX10-NEXT: s_andn2_b32 s17, 0x7f, s16
+; GFX10-NEXT: s_lshr_b32 s17, s1, 31
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
-; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[18:19]
-; GFX10-NEXT: s_not_b32 s18, s16
-; GFX10-NEXT: s_sub_i32 s21, s17, 64
-; GFX10-NEXT: s_sub_i32 s22, 64, s17
+; GFX10-NEXT: s_or_b32 s2, s17, s2
+; GFX10-NEXT: s_andn2_b32 s17, 0x7f, s16
+; GFX10-NEXT: s_not_b32 s21, s16
+; GFX10-NEXT: s_sub_i32 s26, s17, 64
+; GFX10-NEXT: s_sub_i32 s18, 64, s17
; GFX10-NEXT: s_cmp_lt_u32 s17, 64
-; GFX10-NEXT: s_cselect_b32 s28, 1, 0
+; GFX10-NEXT: s_cselect_b32 s27, 1, 0
; GFX10-NEXT: s_cmp_eq_u32 s17, 0
; GFX10-NEXT: s_cselect_b32 s17, 1, 0
-; GFX10-NEXT: s_lshr_b64 s[22:23], s[0:1], s22
-; GFX10-NEXT: s_lshl_b64 s[24:25], s[2:3], s18
-; GFX10-NEXT: s_lshl_b64 s[26:27], s[0:1], s18
-; GFX10-NEXT: s_or_b64 s[22:23], s[22:23], s[24:25]
-; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], s21
-; GFX10-NEXT: s_cmp_lg_u32 s28, 0
-; GFX10-NEXT: s_cselect_b64 s[24:25], s[26:27], 0
-; GFX10-NEXT: s_cselect_b64 s[0:1], s[22:23], s[0:1]
+; GFX10-NEXT: s_lshr_b64 s[18:19], s[0:1], s18
+; GFX10-NEXT: s_lshl_b64 s[22:23], s[2:3], s21
+; GFX10-NEXT: s_lshl_b64 s[24:25], s[0:1], s21
+; GFX10-NEXT: s_or_b64 s[18:19], s[18:19], s[22:23]
+; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], s26
+; GFX10-NEXT: s_cmp_lg_u32 s27, 0
+; GFX10-NEXT: s_cselect_b64 s[22:23], s[24:25], 0
+; GFX10-NEXT: s_cselect_b64 s[0:1], s[18:19], s[0:1]
; GFX10-NEXT: s_cmp_lg_u32 s17, 0
; GFX10-NEXT: s_cselect_b64 s[2:3], s[2:3], s[0:1]
; GFX10-NEXT: s_and_b32 s0, s16, 0x7f
-; GFX10-NEXT: s_sub_i32 s18, s0, 64
+; GFX10-NEXT: s_sub_i32 s21, s0, 64
; GFX10-NEXT: s_sub_i32 s17, 64, s0
; GFX10-NEXT: s_cmp_lt_u32 s0, 64
-; GFX10-NEXT: s_cselect_b32 s21, 1, 0
+; GFX10-NEXT: s_cselect_b32 s24, 1, 0
; GFX10-NEXT: s_cmp_eq_u32 s0, 0
-; GFX10-NEXT: s_cselect_b32 s26, 1, 0
+; GFX10-NEXT: s_cselect_b32 s25, 1, 0
; GFX10-NEXT: s_lshr_b64 s[0:1], s[8:9], s16
-; GFX10-NEXT: s_lshl_b64 s[22:23], s[10:11], s17
+; GFX10-NEXT: s_lshl_b64 s[18:19], s[10:11], s17
; GFX10-NEXT: s_lshr_b64 s[16:17], s[10:11], s16
-; GFX10-NEXT: s_or_b64 s[0:1], s[0:1], s[22:23]
-; GFX10-NEXT: s_lshr_b64 s[10:11], s[10:11], s18
-; GFX10-NEXT: s_cmp_lg_u32 s21, 0
+; GFX10-NEXT: s_or_b64 s[0:1], s[0:1], s[18:19]
+; GFX10-NEXT: s_lshr_b64 s[10:11], s[10:11], s21
+; GFX10-NEXT: s_cmp_lg_u32 s24, 0
; GFX10-NEXT: s_cselect_b64 s[0:1], s[0:1], s[10:11]
-; GFX10-NEXT: s_cmp_lg_u32 s26, 0
+; GFX10-NEXT: s_cmp_lg_u32 s25, 0
; GFX10-NEXT: s_cselect_b64 s[0:1], s[8:9], s[0:1]
-; GFX10-NEXT: s_cmp_lg_u32 s21, 0
+; GFX10-NEXT: s_cmp_lg_u32 s24, 0
; GFX10-NEXT: s_cselect_b64 s[8:9], s[16:17], 0
; GFX10-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
-; GFX10-NEXT: s_lshr_b32 s18, s5, 31
+; GFX10-NEXT: s_lshr_b32 s8, s5, 31
+; GFX10-NEXT: s_or_b64 s[0:1], s[22:23], s[0:1]
+; GFX10-NEXT: s_or_b32 s6, s8, s6
; GFX10-NEXT: s_andn2_b32 s8, 0x7f, s20
-; GFX10-NEXT: s_or_b64 s[0:1], s[24:25], s[0:1]
; GFX10-NEXT: s_lshl_b64 s[4:5], s[4:5], 1
-; GFX10-NEXT: s_or_b64 s[6:7], s[6:7], s[18:19]
; GFX10-NEXT: s_not_b32 s16, s20
; GFX10-NEXT: s_sub_i32 s18, s8, 64
; GFX10-NEXT: s_sub_i32 s9, 64, s8
@@ -7390,54 +7365,53 @@ define amdgpu_ps <2 x i128> @s_fshr_v2i128(<2 x i128> inreg %lhs, <2 x i128> inr
; GFX11-LABEL: s_fshr_v2i128:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX11-NEXT: s_lshr_b32 s18, s1, 31
-; GFX11-NEXT: s_mov_b32 s19, 0
-; GFX11-NEXT: s_and_not1_b32 s17, 0x7f, s16
+; GFX11-NEXT: s_lshr_b32 s17, s1, 31
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
-; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], s[18:19]
-; GFX11-NEXT: s_not_b32 s18, s16
-; GFX11-NEXT: s_sub_i32 s21, s17, 64
-; GFX11-NEXT: s_sub_i32 s22, 64, s17
+; GFX11-NEXT: s_or_b32 s2, s17, s2
+; GFX11-NEXT: s_and_not1_b32 s17, 0x7f, s16
+; GFX11-NEXT: s_not_b32 s21, s16
+; GFX11-NEXT: s_sub_i32 s26, s17, 64
+; GFX11-NEXT: s_sub_i32 s18, 64, s17
; GFX11-NEXT: s_cmp_lt_u32 s17, 64
-; GFX11-NEXT: s_cselect_b32 s28, 1, 0
+; GFX11-NEXT: s_cselect_b32 s27, 1, 0
; GFX11-NEXT: s_cmp_eq_u32 s17, 0
; GFX11-NEXT: s_cselect_b32 s17, 1, 0
-; GFX11-NEXT: s_lshr_b64 s[22:23], s[0:1], s22
-; GFX11-NEXT: s_lshl_b64 s[24:25], s[2:3], s18
-; GFX11-NEXT: s_lshl_b64 s[26:27], s[0:1], s18
-; GFX11-NEXT: s_or_b64 s[22:23], s[22:23], s[24:25]
-; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], s21
-; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_cselect_b64 s[24:25], s[26:27], 0
-; GFX11-NEXT: s_cselect_b64 s[0:1], s[22:23], s[0:1]
+; GFX11-NEXT: s_lshr_b64 s[18:19], s[0:1], s18
+; GFX11-NEXT: s_lshl_b64 s[22:23], s[2:3], s21
+; GFX11-NEXT: s_lshl_b64 s[24:25], s[0:1], s21
+; GFX11-NEXT: s_or_b64 s[18:19], s[18:19], s[22:23]
+; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], s26
+; GFX11-NEXT: s_cmp_lg_u32 s27, 0
+; GFX11-NEXT: s_cselect_b64 s[22:23], s[24:25], 0
+; GFX11-NEXT: s_cselect_b64 s[0:1], s[18:19], s[0:1]
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
; GFX11-NEXT: s_cselect_b64 s[2:3], s[2:3], s[0:1]
; GFX11-NEXT: s_and_b32 s0, s16, 0x7f
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_sub_i32 s18, s0, 64
+; GFX11-NEXT: s_sub_i32 s21, s0, 64
; GFX11-NEXT: s_sub_i32 s17, 64, s0
; GFX11-NEXT: s_cmp_lt_u32 s0, 64
-; GFX11-NEXT: s_cselect_b32 s21, 1, 0
+; GFX11-NEXT: s_cselect_b32 s24, 1, 0
; GFX11-NEXT: s_cmp_eq_u32 s0, 0
-; GFX11-NEXT: s_cselect_b32 s26, 1, 0
+; GFX11-NEXT: s_cselect_b32 s25, 1, 0
; GFX11-NEXT: s_lshr_b64 s[0:1], s[8:9], s16
-; GFX11-NEXT: s_lshl_b64 s[22:23], s[10:11], s17
+; GFX11-NEXT: s_lshl_b64 s[18:19], s[10:11], s17
; GFX11-NEXT: s_lshr_b64 s[16:17], s[10:11], s16
-; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[22:23]
-; GFX11-NEXT: s_lshr_b64 s[10:11], s[10:11], s18
-; GFX11-NEXT: s_cmp_lg_u32 s21, 0
+; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[18:19]
+; GFX11-NEXT: s_lshr_b64 s[10:11], s[10:11], s21
+; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_cselect_b64 s[0:1], s[0:1], s[10:11]
-; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_cmp_lg_u32 s25, 0
; GFX11-NEXT: s_cselect_b64 s[0:1], s[8:9], s[0:1]
-; GFX11-NEXT: s_cmp_lg_u32 s21, 0
+; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_cselect_b64 s[8:9], s[16:17], 0
; GFX11-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
-; GFX11-NEXT: s_lshr_b32 s18, s5, 31
+; GFX11-NEXT: s_lshr_b32 s8, s5, 31
+; GFX11-NEXT: s_or_b64 s[0:1], s[22:23], s[0:1]
+; GFX11-NEXT: s_or_b32 s6, s8, s6
; GFX11-NEXT: s_and_not1_b32 s8, 0x7f, s20
-; GFX11-NEXT: s_or_b64 s[0:1], s[24:25], s[0:1]
; GFX11-NEXT: s_lshl_b64 s[4:5], s[4:5], 1
-; GFX11-NEXT: s_or_b64 s[6:7], s[6:7], s[18:19]
; GFX11-NEXT: s_not_b32 s16, s20
; GFX11-NEXT: s_sub_i32 s18, s8, 64
; GFX11-NEXT: s_sub_i32 s9, 64, s8
@@ -7488,7 +7462,7 @@ define <2 x i128> @v_fshr_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %a
; GFX6-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
; GFX6-NEXT: v_lshl_b64 v[17:18], v[0:1], 1
; GFX6-NEXT: v_lshrrev_b32_e32 v0, 31, v1
-; GFX6-NEXT: v_or_b32_e32 v2, v2, v0
+; GFX6-NEXT: v_or_b32_e32 v2, v0, v2
; GFX6-NEXT: v_not_b32_e32 v0, v16
; GFX6-NEXT: v_and_b32_e32 v19, 0x7f, v0
; GFX6-NEXT: v_sub_i32_e32 v0, vcc, 64, v19
@@ -7530,7 +7504,7 @@ define <2 x i128> @v_fshr_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %a
; GFX6-NEXT: v_or_b32_e32 v1, v17, v9
; GFX6-NEXT: v_lshl_b64 v[8:9], v[4:5], 1
; GFX6-NEXT: v_lshrrev_b32_e32 v4, 31, v5
-; GFX6-NEXT: v_or_b32_e32 v6, v6, v4
+; GFX6-NEXT: v_or_b32_e32 v6, v4, v6
; GFX6-NEXT: v_not_b32_e32 v4, v20
; GFX6-NEXT: v_and_b32_e32 v16, 0x7f, v4
; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 64, v16
@@ -7580,7 +7554,7 @@ define <2 x i128> @v_fshr_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %a
; GFX8-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
; GFX8-NEXT: v_lshlrev_b64 v[17:18], 1, v[0:1]
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 31, v1
-; GFX8-NEXT: v_or_b32_e32 v2, v2, v0
+; GFX8-NEXT: v_or_b32_e32 v2, v0, v2
; GFX8-NEXT: v_not_b32_e32 v0, v16
; GFX8-NEXT: v_and_b32_e32 v19, 0x7f, v0
; GFX8-NEXT: v_sub_u32_e32 v0, vcc, 64, v19
@@ -7622,7 +7596,7 @@ define <2 x i128> @v_fshr_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %a
; GFX8-NEXT: v_or_b32_e32 v1, v17, v9
; GFX8-NEXT: v_lshlrev_b64 v[8:9], 1, v[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 31, v5
-; GFX8-NEXT: v_or_b32_e32 v6, v6, v4
+; GFX8-NEXT: v_or_b32_e32 v6, v4, v6
; GFX8-NEXT: v_not_b32_e32 v4, v20
; GFX8-NEXT: v_and_b32_e32 v16, 0x7f, v4
; GFX8-NEXT: v_sub_u32_e32 v4, vcc, 64, v16
@@ -7672,7 +7646,7 @@ define <2 x i128> @v_fshr_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %a
; GFX9-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
; GFX9-NEXT: v_lshlrev_b64 v[17:18], 1, v[0:1]
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 31, v1
-; GFX9-NEXT: v_or_b32_e32 v2, v2, v0
+; GFX9-NEXT: v_or_b32_e32 v2, v0, v2
; GFX9-NEXT: v_not_b32_e32 v0, v16
; GFX9-NEXT: v_and_b32_e32 v19, 0x7f, v0
; GFX9-NEXT: v_sub_u32_e32 v0, 64, v19
@@ -7713,7 +7687,7 @@ define <2 x i128> @v_fshr_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %a
; GFX9-NEXT: v_or_b32_e32 v1, v17, v9
; GFX9-NEXT: v_lshlrev_b64 v[8:9], 1, v[4:5]
; GFX9-NEXT: v_lshrrev_b32_e32 v4, 31, v5
-; GFX9-NEXT: v_or_b32_e32 v6, v6, v4
+; GFX9-NEXT: v_or_b32_e32 v6, v4, v6
; GFX9-NEXT: v_not_b32_e32 v4, v20
; GFX9-NEXT: v_and_b32_e32 v16, 0x7f, v4
; GFX9-NEXT: v_sub_u32_e32 v4, 64, v16
@@ -7769,7 +7743,7 @@ define <2 x i128> @v_fshr_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %a
; GFX10-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
; GFX10-NEXT: v_add_nc_u32_e32 v27, 0xffffffc0, v26
; GFX10-NEXT: v_sub_nc_u32_e32 v18, 64, v25
-; GFX10-NEXT: v_or_b32_e32 v2, v2, v17
+; GFX10-NEXT: v_or_b32_e32 v2, v17, v2
; GFX10-NEXT: v_add_nc_u32_e32 v19, 0xffffffc0, v25
; GFX10-NEXT: v_lshlrev_b64 v[23:24], v25, v[0:1]
; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v25
@@ -7801,7 +7775,7 @@ define <2 x i128> @v_fshr_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %a
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v17, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v8, s4
; GFX10-NEXT: v_and_b32_e32 v25, 0x7f, v16
-; GFX10-NEXT: v_or_b32_e32 v6, v6, v10
+; GFX10-NEXT: v_or_b32_e32 v6, v10, v6
; GFX10-NEXT: v_and_b32_e32 v20, 0x7f, v20
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, v9, s4
; GFX10-NEXT: v_cndmask_b32_e32 v26, 0, v2, vcc_lo
@@ -7860,7 +7834,7 @@ define <2 x i128> @v_fshr_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %a
; GFX11-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
; GFX11-NEXT: v_sub_nc_u32_e32 v18, 64, v25
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_or_b32_e32 v2, v2, v17
+; GFX11-NEXT: v_or_b32_e32 v2, v17, v2
; GFX11-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v25
; GFX11-NEXT: v_lshlrev_b64 v[23:24], v25, v[0:1]
; GFX11-NEXT: v_and_b32_e32 v26, 0x7f, v16
@@ -7893,7 +7867,7 @@ define <2 x i128> @v_fshr_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %a
; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v16, vcc_lo
; GFX11-NEXT: v_not_b32_e32 v16, v20
; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 0, v26
-; GFX11-NEXT: v_or_b32_e32 v6, v6, v10
+; GFX11-NEXT: v_or_b32_e32 v6, v10, v6
; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v17 :: v_dual_and_b32 v20, 0x7f, v20
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_dual_cndmask_b32 v26, 0, v2 :: v_dual_and_b32 v25, 0x7f, v16
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
index 4031fe0be2823..f57fc005b994b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
@@ -194,10 +194,8 @@ declare i32 @llvm.amdgcn.readfirstlane(i32)
define amdgpu_ps i64 @s_sdiv_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-LABEL: s_sdiv_i64:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_mov_b32 s6, 0
; CHECK-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
-; CHECK-NEXT: s_mov_b32 s7, -1
-; CHECK-NEXT: s_and_b64 s[0:1], s[0:1], s[6:7]
+; CHECK-NEXT: s_mov_b32 s0, 0
; CHECK-NEXT: v_cmp_ne_u64_e64 vcc, s[0:1], 0
; CHECK-NEXT: s_mov_b32 s0, 1
; CHECK-NEXT: s_cbranch_vccz .LBB1_2
@@ -218,7 +216,6 @@ define amdgpu_ps i64 @s_sdiv_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1
; CHECK-NEXT: v_rcp_iflag_f32_e32 v0, v0
; CHECK-NEXT: s_subb_u32 s5, 0, s11
-; CHECK-NEXT: s_xor_b64 s[6:7], s[6:7], s[8:9]
; CHECK-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
; CHECK-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
; CHECK-NEXT: v_trunc_f32_e32 v2, v1
@@ -327,9 +324,10 @@ define amdgpu_ps i64 @s_sdiv_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
-; CHECK-NEXT: v_xor_b32_e32 v0, s6, v0
+; CHECK-NEXT: s_xor_b64 s[0:1], s[6:7], s[8:9]
+; CHECK-NEXT: v_xor_b32_e32 v0, s0, v0
+; CHECK-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0
; CHECK-NEXT: s_mov_b32 s0, 0
-; CHECK-NEXT: v_subrev_i32_e32 v0, vcc, s6, v0
; CHECK-NEXT: s_branch .LBB1_3
; CHECK-NEXT: .LBB1_2:
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sext_inreg.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sext_inreg.ll
index a9b3deb3e49f4..118e63409f810 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sext_inreg.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sext_inreg.ll
@@ -1319,7 +1319,7 @@ define i65 @v_sext_inreg_i65_22(i65 %value) {
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_lshl_b64 v[2:3], v[2:3], 22
; GFX6-NEXT: v_lshrrev_b32_e32 v3, 10, v1
-; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 1
; GFX6-NEXT: v_ashrrev_i32_e32 v3, 31, v2
; GFX6-NEXT: v_bfe_u32 v1, v1, 0, 10
@@ -1333,7 +1333,7 @@ define i65 @v_sext_inreg_i65_22(i65 %value) {
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_lshlrev_b64 v[2:3], 22, v[2:3]
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 10, v1
-; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: v_or_b32_e32 v2, v3, v2
; GFX8-NEXT: v_bfe_i32 v2, v2, 0, 1
; GFX8-NEXT: v_ashrrev_i32_e32 v3, 31, v2
; GFX8-NEXT: v_bfe_u32 v1, v1, 0, 10
@@ -1347,7 +1347,7 @@ define i65 @v_sext_inreg_i65_22(i65 %value) {
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_lshlrev_b64 v[2:3], 22, v[2:3]
; GFX9-NEXT: v_lshrrev_b32_e32 v3, 10, v1
-; GFX9-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX9-NEXT: v_or_b32_e32 v2, v3, v2
; GFX9-NEXT: v_bfe_i32 v2, v2, 0, 1
; GFX9-NEXT: v_ashrrev_i32_e32 v3, 31, v2
; GFX9-NEXT: v_bfe_u32 v1, v1, 0, 10
@@ -1361,7 +1361,7 @@ define i65 @v_sext_inreg_i65_22(i65 %value) {
; GFX10PLUS-NEXT: v_lshlrev_b64 v[2:3], 22, v[2:3]
; GFX10PLUS-NEXT: v_lshrrev_b32_e32 v3, 10, v1
; GFX10PLUS-NEXT: v_bfe_u32 v1, v1, 0, 10
-; GFX10PLUS-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX10PLUS-NEXT: v_or_b32_e32 v2, v3, v2
; GFX10PLUS-NEXT: v_bfe_i32 v2, v2, 0, 1
; GFX10PLUS-NEXT: v_ashrrev_i32_e32 v3, 31, v2
; GFX10PLUS-NEXT: v_lshl_or_b32 v1, v2, 10, v1
@@ -1429,29 +1429,27 @@ define amdgpu_ps i65 @s_sext_inreg_i65_18(i65 inreg %value) {
; GCN-LABEL: s_sext_inreg_i65_18:
; GCN: ; %bb.0:
; GCN-NEXT: s_lshl_b64 s[2:3], s[2:3], 18
-; GCN-NEXT: s_lshr_b32 s4, s1, 14
-; GCN-NEXT: s_mov_b32 s5, 0
-; GCN-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GCN-NEXT: s_lshr_b32 s3, s1, 14
+; GCN-NEXT: s_or_b32 s2, s3, s2
; GCN-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x10000
; GCN-NEXT: s_bfe_u64 s[0:1], s[0:1], 0x2e0000
-; GCN-NEXT: s_lshl_b32 s7, s2, 14
-; GCN-NEXT: s_mov_b32 s6, s5
-; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[6:7]
+; GCN-NEXT: s_lshl_b32 s5, s2, 14
+; GCN-NEXT: s_mov_b32 s4, 0
+; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
; GCN-NEXT: s_ashr_i64 s[2:3], s[2:3], 18
; GCN-NEXT: ; return to shader part epilog
;
; GFX10PLUS-LABEL: s_sext_inreg_i65_18:
; GFX10PLUS: ; %bb.0:
; GFX10PLUS-NEXT: s_lshl_b64 s[2:3], s[2:3], 18
-; GFX10PLUS-NEXT: s_lshr_b32 s4, s1, 14
-; GFX10PLUS-NEXT: s_mov_b32 s5, 0
+; GFX10PLUS-NEXT: s_lshr_b32 s3, s1, 14
; GFX10PLUS-NEXT: s_bfe_u64 s[0:1], s[0:1], 0x2e0000
-; GFX10PLUS-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
-; GFX10PLUS-NEXT: s_mov_b32 s6, s5
+; GFX10PLUS-NEXT: s_or_b32 s2, s3, s2
+; GFX10PLUS-NEXT: s_mov_b32 s4, 0
; GFX10PLUS-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x10000
-; GFX10PLUS-NEXT: s_lshl_b32 s7, s2, 14
+; GFX10PLUS-NEXT: s_lshl_b32 s5, s2, 14
; GFX10PLUS-NEXT: s_ashr_i64 s[2:3], s[2:3], 18
-; GFX10PLUS-NEXT: s_or_b64 s[0:1], s[0:1], s[6:7]
+; GFX10PLUS-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
; GFX10PLUS-NEXT: ; return to shader part epilog
%shl = shl i65 %value, 18
%ashr = ashr i65 %shl, 18
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
index 1a10f5fb7a5ce..8d8eca162257a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
@@ -188,12 +188,10 @@ declare i32 @llvm.amdgcn.readfirstlane(i32)
define amdgpu_ps i64 @s_srem_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-LABEL: s_srem_i64:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_mov_b32 s6, 0
; CHECK-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
-; CHECK-NEXT: s_mov_b32 s7, -1
-; CHECK-NEXT: s_and_b64 s[0:1], s[0:1], s[6:7]
+; CHECK-NEXT: s_mov_b32 s0, 0
; CHECK-NEXT: v_cmp_ne_u64_e64 vcc, s[0:1], 0
-; CHECK-NEXT: s_mov_b32 s7, 1
+; CHECK-NEXT: s_mov_b32 s0, 1
; CHECK-NEXT: s_cbranch_vccz .LBB1_2
; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: s_ashr_i32 s6, s3, 31
@@ -212,7 +210,6 @@ define amdgpu_ps i64 @s_srem_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1
; CHECK-NEXT: v_rcp_iflag_f32_e32 v0, v0
; CHECK-NEXT: s_subb_u32 s5, 0, s9
-; CHECK-NEXT: s_mov_b32 s7, 0
; CHECK-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
; CHECK-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
; CHECK-NEXT: v_trunc_f32_e32 v2, v1
@@ -273,43 +270,43 @@ define amdgpu_ps i64 @s_srem_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_addc_u32_e32 v1, vcc, v4, v1, vcc
; CHECK-NEXT: v_mul_lo_u32 v2, s11, v0
; CHECK-NEXT: v_mul_lo_u32 v3, s10, v1
-; CHECK-NEXT: v_mul_hi_u32 v5, s10, v0
+; CHECK-NEXT: v_mul_hi_u32 v4, s10, v0
; CHECK-NEXT: v_mul_hi_u32 v0, s11, v0
-; CHECK-NEXT: v_mul_hi_u32 v6, s11, v1
+; CHECK-NEXT: v_mul_hi_u32 v5, s11, v1
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v3
; CHECK-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v5
+; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v4
; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT: v_mul_lo_u32 v5, s11, v1
+; CHECK-NEXT: v_mul_lo_u32 v4, s11, v1
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v3, v2
; CHECK-NEXT: v_mul_hi_u32 v3, s10, v1
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v5, v0
-; CHECK-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; CHECK-NEXT: v_add_i32_e32 v0, vcc, v4, v0
+; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v3
; CHECK-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v3, vcc, v5, v3
-; CHECK-NEXT: v_add_i32_e32 v5, vcc, v0, v2
-; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s8, v5, 0
+; CHECK-NEXT: v_add_i32_e32 v3, vcc, v4, v3
+; CHECK-NEXT: v_add_i32_e32 v4, vcc, v0, v2
+; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s8, v4, 0
; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v6, v2
+; CHECK-NEXT: v_add_i32_e32 v2, vcc, v5, v2
; CHECK-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s8, v2, v[1:2]
-; CHECK-NEXT: v_mov_b32_e32 v3, s11
+; CHECK-NEXT: v_mov_b32_e32 v5, s11
; CHECK-NEXT: v_sub_i32_e32 v0, vcc, s10, v0
-; CHECK-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s9, v5, v[1:2]
-; CHECK-NEXT: v_mov_b32_e32 v4, s9
-; CHECK-NEXT: v_subb_u32_e64 v2, s[0:1], v3, v1, vcc
+; CHECK-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s9, v4, v[1:2]
+; CHECK-NEXT: v_mov_b32_e32 v3, s9
+; CHECK-NEXT: v_subb_u32_e64 v2, s[0:1], v5, v1, vcc
; CHECK-NEXT: v_sub_i32_e64 v1, s[0:1], s11, v1
+; CHECK-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
; CHECK-NEXT: v_cmp_le_u32_e64 s[0:1], s9, v2
-; CHECK-NEXT: v_cndmask_b32_e64 v3, 0, -1, s[0:1]
+; CHECK-NEXT: v_subrev_i32_e32 v3, vcc, s8, v0
+; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1]
; CHECK-NEXT: v_cmp_le_u32_e64 s[0:1], s8, v0
+; CHECK-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
; CHECK-NEXT: v_cndmask_b32_e64 v5, 0, -1, s[0:1]
; CHECK-NEXT: v_cmp_eq_u32_e64 s[0:1], s9, v2
-; CHECK-NEXT: v_subb_u32_e32 v1, vcc, v1, v4, vcc
-; CHECK-NEXT: v_cndmask_b32_e64 v2, v3, v5, s[0:1]
-; CHECK-NEXT: v_subrev_i32_e32 v3, vcc, s8, v0
-; CHECK-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
; CHECK-NEXT: v_cmp_le_u32_e32 vcc, s9, v1
+; CHECK-NEXT: v_cndmask_b32_e64 v2, v4, v5, s[0:1]
; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc
; CHECK-NEXT: v_cmp_le_u32_e32 vcc, s8, v3
; CHECK-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc
@@ -322,11 +319,12 @@ define amdgpu_ps i64 @s_srem_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
; CHECK-NEXT: v_xor_b32_e32 v0, s6, v0
; CHECK-NEXT: v_subrev_i32_e32 v0, vcc, s6, v0
+; CHECK-NEXT: s_mov_b32 s0, 0
; CHECK-NEXT: s_branch .LBB1_3
; CHECK-NEXT: .LBB1_2:
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
; CHECK-NEXT: .LBB1_3: ; %Flow
-; CHECK-NEXT: s_xor_b32 s0, s7, 1
+; CHECK-NEXT: s_xor_b32 s0, s0, 1
; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cbranch_scc1 .LBB1_5
; CHECK-NEXT: ; %bb.4:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
index 1a6d26142208f..4de10788a6bd7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
@@ -187,11 +187,9 @@ define amdgpu_ps i64 @s_udiv_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-LABEL: s_udiv_i64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_or_b64 s[4:5], s[0:1], s[2:3]
-; CHECK-NEXT: s_mov_b32 s6, 0
-; CHECK-NEXT: s_mov_b32 s7, -1
-; CHECK-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
+; CHECK-NEXT: s_mov_b32 s4, 0
; CHECK-NEXT: v_cmp_ne_u64_e64 vcc, s[4:5], 0
-; CHECK-NEXT: s_mov_b32 s6, 1
+; CHECK-NEXT: s_mov_b32 s4, 1
; CHECK-NEXT: v_cvt_f32_u32_e32 v2, s2
; CHECK-NEXT: s_cbranch_vccz .LBB1_2
; CHECK-NEXT: ; %bb.1:
@@ -199,7 +197,6 @@ define amdgpu_ps i64 @s_udiv_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_cvt_f32_u32_e32 v1, s3
; CHECK-NEXT: s_sub_u32 s4, 0, s2
; CHECK-NEXT: v_mov_b32_e32 v3, s1
-; CHECK-NEXT: s_mov_b32 s6, 0
; CHECK-NEXT: v_madmk_f32 v1, v1, 0x4f800000, v2
; CHECK-NEXT: s_subb_u32 s5, 0, s3
; CHECK-NEXT: v_rcp_iflag_f32_e32 v1, v1
@@ -318,11 +315,12 @@ define amdgpu_ps i64 @s_udiv_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_cndmask_b32_e32 v0, v9, v5, vcc
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; CHECK-NEXT: s_mov_b32 s4, 0
; CHECK-NEXT: s_branch .LBB1_3
; CHECK-NEXT: .LBB1_2:
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
; CHECK-NEXT: .LBB1_3: ; %Flow
-; CHECK-NEXT: s_xor_b32 s1, s6, 1
+; CHECK-NEXT: s_xor_b32 s1, s4, 1
; CHECK-NEXT: s_cmp_lg_u32 s1, 0
; CHECK-NEXT: s_cbranch_scc1 .LBB1_5
; CHECK-NEXT: ; %bb.4:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
index 2a1bf4bf068f0..a41ec8e7ce3ea 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
@@ -184,18 +184,15 @@ define amdgpu_ps i64 @s_urem_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-LABEL: s_urem_i64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_or_b64 s[4:5], s[0:1], s[2:3]
-; CHECK-NEXT: s_mov_b32 s6, 0
-; CHECK-NEXT: s_mov_b32 s7, -1
-; CHECK-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
+; CHECK-NEXT: s_mov_b32 s4, 0
; CHECK-NEXT: v_cmp_ne_u64_e64 vcc, s[4:5], 0
-; CHECK-NEXT: s_mov_b32 s6, 1
+; CHECK-NEXT: s_mov_b32 s4, 1
; CHECK-NEXT: v_cvt_f32_u32_e32 v2, s2
; CHECK-NEXT: s_cbranch_vccz .LBB1_2
; CHECK-NEXT: ; %bb.1:
; CHECK-NEXT: v_mov_b32_e32 v0, s3
; CHECK-NEXT: v_cvt_f32_u32_e32 v1, s3
; CHECK-NEXT: s_sub_u32 s4, 0, s2
-; CHECK-NEXT: s_mov_b32 s6, 0
; CHECK-NEXT: v_mov_b32_e32 v3, s1
; CHECK-NEXT: v_madmk_f32 v1, v1, 0x4f800000, v2
; CHECK-NEXT: s_subb_u32 s5, 0, s3
@@ -314,11 +311,12 @@ define amdgpu_ps i64 @s_urem_i64(i64 inreg %num, i64 inreg %den) {
; CHECK-NEXT: v_cndmask_b32_e32 v0, v3, v6, vcc
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; CHECK-NEXT: s_mov_b32 s4, 0
; CHECK-NEXT: s_branch .LBB1_3
; CHECK-NEXT: .LBB1_2:
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
; CHECK-NEXT: .LBB1_3: ; %Flow
-; CHECK-NEXT: s_xor_b32 s1, s6, 1
+; CHECK-NEXT: s_xor_b32 s1, s4, 1
; CHECK-NEXT: s_cmp_lg_u32 s1, 0
; CHECK-NEXT: s_cbranch_scc1 .LBB1_5
; CHECK-NEXT: ; %bb.4:
diff --git a/llvm/test/CodeGen/AMDGPU/div_i128.ll b/llvm/test/CodeGen/AMDGPU/div_i128.ll
index e6c38d29be949..7c193f7484288 100644
--- a/llvm/test/CodeGen/AMDGPU/div_i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/div_i128.ll
@@ -1371,10 +1371,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], 1, v[10:11]
; GFX9-G-NEXT: v_lshrrev_b32_e32 v10, 31, v13
; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], 1, v[14:15]
-; GFX9-G-NEXT: v_or_b32_e32 v2, v2, v10
+; GFX9-G-NEXT: v_or_b32_e32 v2, v10, v2
; GFX9-G-NEXT: v_lshrrev_b32_e32 v14, 31, v11
; GFX9-G-NEXT: v_sub_co_u32_e32 v10, vcc, v24, v2
-; GFX9-G-NEXT: v_or_b32_e32 v0, v0, v14
+; GFX9-G-NEXT: v_or_b32_e32 v0, v14, v0
; GFX9-G-NEXT: v_subb_co_u32_e32 v10, vcc, v25, v3, vcc
; GFX9-G-NEXT: v_subb_co_u32_e32 v10, vcc, v26, v0, vcc
; GFX9-G-NEXT: v_subb_co_u32_e32 v10, vcc, v27, v1, vcc
@@ -1395,7 +1395,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-G-NEXT: v_or_b32_e32 v0, v20, v22
; GFX9-G-NEXT: v_or_b32_e32 v1, v21, v23
; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GFX9-G-NEXT: v_or_b32_e32 v12, v12, v8
+; GFX9-G-NEXT: v_or_b32_e32 v12, v8, v12
; GFX9-G-NEXT: v_and_b32_e32 v8, 1, v28
; GFX9-G-NEXT: v_mov_b32_e32 v0, v8
; GFX9-G-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
@@ -1409,7 +1409,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], 1, v[6:7]
; GFX9-G-NEXT: v_lshlrev_b64 v[12:13], 1, v[12:13]
; GFX9-G-NEXT: v_lshrrev_b32_e32 v4, 31, v7
-; GFX9-G-NEXT: v_or_b32_e32 v12, v12, v4
+; GFX9-G-NEXT: v_or_b32_e32 v12, v4, v12
; GFX9-G-NEXT: v_or_b32_e32 v6, v0, v2
; GFX9-G-NEXT: v_or_b32_e32 v7, v1, v3
; GFX9-G-NEXT: .LBB0_6: ; %Flow3
@@ -1757,8 +1757,8 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
-; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v5, v2, v3
-; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v0
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v4, v2, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v0
; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v1
; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v11
; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12
@@ -1865,8 +1865,8 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec
; GFX9-G-O0-NEXT: s_mov_b32 s9, 31
; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9
-; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v1, v0, v1
-; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v4
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v0, v0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v4
; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr4_vgpr5 killed $exec
; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v0, v1
; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr22_vgpr23 killed $exec
@@ -1876,8 +1876,8 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec
; GFX9-G-O0-NEXT: s_mov_b32 s9, 31
; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9
-; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v1, v0, v1
-; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v2
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v0, v0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v2
; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v3
; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v0, v1
; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v22
@@ -1892,8 +1892,8 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
-; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v15, v2, v3
-; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v0
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v14, v2, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v0
; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v1
; GFX9-G-O0-NEXT: s_waitcnt vmcnt(10)
; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v27
@@ -3501,9 +3501,9 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-G-NEXT: v_lshlrev_b64 v[10:11], 1, v[16:17]
; GFX9-G-NEXT: v_lshrrev_b32_e32 v12, 31, v3
; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
-; GFX9-G-NEXT: v_or_b32_e32 v10, v10, v12
+; GFX9-G-NEXT: v_or_b32_e32 v10, v12, v10
; GFX9-G-NEXT: v_lshrrev_b32_e32 v12, 31, v9
-; GFX9-G-NEXT: v_or_b32_e32 v2, v2, v12
+; GFX9-G-NEXT: v_or_b32_e32 v2, v12, v2
; GFX9-G-NEXT: v_sub_co_u32_e32 v12, vcc, v22, v2
; GFX9-G-NEXT: v_subb_co_u32_e32 v12, vcc, v23, v3, vcc
; GFX9-G-NEXT: v_subb_co_u32_e32 v12, vcc, v24, v10, vcc
@@ -3525,7 +3525,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-G-NEXT: v_or_b32_e32 v11, v19, v21
; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
-; GFX9-G-NEXT: v_or_b32_e32 v8, v8, v0
+; GFX9-G-NEXT: v_or_b32_e32 v8, v0, v8
; GFX9-G-NEXT: v_and_b32_e32 v0, 1, v12
; GFX9-G-NEXT: v_mov_b32_e32 v11, v1
; GFX9-G-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
@@ -3539,7 +3539,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], 1, v[14:15]
; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 31, v15
-; GFX9-G-NEXT: v_or_b32_e32 v8, v8, v2
+; GFX9-G-NEXT: v_or_b32_e32 v8, v2, v8
; GFX9-G-NEXT: v_or_b32_e32 v10, v10, v0
; GFX9-G-NEXT: v_or_b32_e32 v11, v11, v1
; GFX9-G-NEXT: .LBB1_6: ; %Flow3
@@ -3844,8 +3844,8 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
-; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v5, v2, v3
-; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v0
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v4, v2, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v0
; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v1
; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v11
; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12
@@ -3952,8 +3952,8 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec
; GFX9-G-O0-NEXT: s_mov_b32 s9, 31
; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9
-; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v1, v0, v1
-; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v4
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v0, v0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v4
; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr4_vgpr5 killed $exec
; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v0, v1
; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr12_vgpr13 killed $exec
@@ -3963,8 +3963,8 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec
; GFX9-G-O0-NEXT: s_mov_b32 s9, 31
; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9
-; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v1, v0, v1
-; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v2
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v0, v0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v2
; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v3
; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v0, v1
; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12
@@ -3979,8 +3979,8 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
-; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v13, v2, v3
-; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v0
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v12, v2, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v0
; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v1
; GFX9-G-O0-NEXT: s_waitcnt vmcnt(10)
; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v28
diff --git a/llvm/test/CodeGen/AMDGPU/div_v2i128.ll b/llvm/test/CodeGen/AMDGPU/div_v2i128.ll
index 7ea98a16e3b84..3599b3dea3eb9 100644
--- a/llvm/test/CodeGen/AMDGPU/div_v2i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/div_v2i128.ll
@@ -573,9 +573,9 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_lshrrev_b32_e32 v22, 31, v19
; GISEL-NEXT: v_lshl_b64 v[0:1], v[18:19], 1
; GISEL-NEXT: v_lshl_b64 v[2:3], v[20:21], 1
-; GISEL-NEXT: v_or_b32_e32 v2, v2, v22
+; GISEL-NEXT: v_or_b32_e32 v2, v22, v2
; GISEL-NEXT: v_lshrrev_b32_e32 v18, 31, v9
-; GISEL-NEXT: v_or_b32_e32 v0, v0, v18
+; GISEL-NEXT: v_or_b32_e32 v0, v18, v0
; GISEL-NEXT: v_sub_i32_e32 v18, vcc, v32, v0
; GISEL-NEXT: v_subb_u32_e32 v18, vcc, v33, v1, vcc
; GISEL-NEXT: v_subb_u32_e32 v18, vcc, v34, v2, vcc
@@ -599,7 +599,7 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_and_b32_e32 v22, 1, v22
; GISEL-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
; GISEL-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; GISEL-NEXT: v_or_b32_e32 v8, v8, v36
+; GISEL-NEXT: v_or_b32_e32 v8, v36, v8
; GISEL-NEXT: v_mov_b32_e32 v0, v22
; GISEL-NEXT: v_mov_b32_e32 v1, v23
; GISEL-NEXT: s_andn2_b64 exec, exec, s[8:9]
@@ -611,7 +611,7 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_lshl_b64 v[2:3], v[16:17], 1
; GISEL-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
; GISEL-NEXT: v_lshrrev_b32_e32 v10, 31, v17
-; GISEL-NEXT: v_or_b32_e32 v8, v8, v10
+; GISEL-NEXT: v_or_b32_e32 v8, v10, v8
; GISEL-NEXT: v_or_b32_e32 v22, v0, v2
; GISEL-NEXT: v_or_b32_e32 v23, v1, v3
; GISEL-NEXT: .LBB0_6: ; %Flow16
@@ -766,9 +766,9 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_lshrrev_b32_e32 v14, 31, v15
; GISEL-NEXT: v_add_i32_e32 v26, vcc, -1, v26
; GISEL-NEXT: v_addc_u32_e32 v27, vcc, -1, v27, vcc
-; GISEL-NEXT: v_or_b32_e32 v16, v16, v6
-; GISEL-NEXT: v_or_b32_e32 v2, v2, v34
-; GISEL-NEXT: v_or_b32_e32 v10, v10, v14
+; GISEL-NEXT: v_or_b32_e32 v16, v6, v16
+; GISEL-NEXT: v_or_b32_e32 v2, v34, v2
+; GISEL-NEXT: v_or_b32_e32 v10, v14, v10
; GISEL-NEXT: v_or_b32_e32 v14, v0, v12
; GISEL-NEXT: v_or_b32_e32 v15, v1, v13
; GISEL-NEXT: v_addc_u32_e32 v28, vcc, -1, v28, vcc
@@ -802,7 +802,7 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_lshl_b64 v[2:3], v[14:15], 1
; GISEL-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
; GISEL-NEXT: v_lshrrev_b32_e32 v4, 31, v15
-; GISEL-NEXT: v_or_b32_e32 v10, v10, v4
+; GISEL-NEXT: v_or_b32_e32 v10, v4, v10
; GISEL-NEXT: v_or_b32_e32 v14, v0, v2
; GISEL-NEXT: v_or_b32_e32 v15, v1, v3
; GISEL-NEXT: .LBB0_12: ; %Flow12
@@ -1333,8 +1333,8 @@ define <2 x i128> @v_udiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
; GISEL-NEXT: v_or_b32_e32 v22, v18, v20
; GISEL-NEXT: v_or_b32_e32 v23, v19, v21
-; GISEL-NEXT: v_or_b32_e32 v16, v16, v0
-; GISEL-NEXT: v_or_b32_e32 v20, v24, v35
+; GISEL-NEXT: v_or_b32_e32 v16, v0, v16
+; GISEL-NEXT: v_or_b32_e32 v20, v35, v24
; GISEL-NEXT: v_addc_u32_e32 v28, vcc, -1, v28, vcc
; GISEL-NEXT: v_addc_u32_e32 v29, vcc, -1, v29, vcc
; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v30, v20
@@ -1355,7 +1355,7 @@ define <2 x i128> @v_udiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_subb_u32_e32 v25, vcc, v25, v19, vcc
; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v16, v21, vcc
; GISEL-NEXT: v_subb_u32_e32 v17, vcc, v17, v35, vcc
-; GISEL-NEXT: v_or_b32_e32 v2, v2, v34
+; GISEL-NEXT: v_or_b32_e32 v2, v34, v2
; GISEL-NEXT: v_mov_b32_e32 v19, v1
; GISEL-NEXT: v_mov_b32_e32 v18, v0
; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
@@ -1367,7 +1367,7 @@ define <2 x i128> @v_udiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_lshl_b64 v[0:1], v[22:23], 1
; GISEL-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
; GISEL-NEXT: v_lshrrev_b32_e32 v8, 31, v23
-; GISEL-NEXT: v_or_b32_e32 v2, v2, v8
+; GISEL-NEXT: v_or_b32_e32 v2, v8, v2
; GISEL-NEXT: v_or_b32_e32 v18, v18, v0
; GISEL-NEXT: v_or_b32_e32 v19, v19, v1
; GISEL-NEXT: .LBB1_6: ; %Flow16
@@ -1504,9 +1504,9 @@ define <2 x i128> @v_udiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_lshrrev_b32_e32 v9, 31, v10
; GISEL-NEXT: v_add_i32_e32 v8, vcc, -1, v8
; GISEL-NEXT: v_addc_u32_e32 v11, vcc, -1, v11, vcc
-; GISEL-NEXT: v_or_b32_e32 v16, v16, v4
-; GISEL-NEXT: v_or_b32_e32 v22, v22, v30
-; GISEL-NEXT: v_or_b32_e32 v0, v0, v9
+; GISEL-NEXT: v_or_b32_e32 v16, v4, v16
+; GISEL-NEXT: v_or_b32_e32 v22, v30, v22
+; GISEL-NEXT: v_or_b32_e32 v0, v9, v0
; GISEL-NEXT: v_or_b32_e32 v9, v20, v6
; GISEL-NEXT: v_or_b32_e32 v10, v21, v7
; GISEL-NEXT: v_addc_u32_e32 v24, vcc, -1, v24, vcc
@@ -1540,7 +1540,7 @@ define <2 x i128> @v_udiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_lshl_b64 v[4:5], v[9:10], 1
; GISEL-NEXT: v_lshl_b64 v[8:9], v[0:1], 1
; GISEL-NEXT: v_lshrrev_b32_e32 v0, 31, v10
-; GISEL-NEXT: v_or_b32_e32 v8, v8, v0
+; GISEL-NEXT: v_or_b32_e32 v8, v0, v8
; GISEL-NEXT: v_or_b32_e32 v10, v20, v4
; GISEL-NEXT: v_or_b32_e32 v11, v21, v5
; GISEL-NEXT: .LBB1_12: ; %Flow12
@@ -2175,8 +2175,8 @@ define <2 x i128> @v_srem_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_lshl_b64 v[18:19], v[18:19], 1
; GISEL-NEXT: v_or_b32_e32 v20, v0, v2
; GISEL-NEXT: v_or_b32_e32 v21, v1, v3
-; GISEL-NEXT: v_or_b32_e32 v2, v24, v22
-; GISEL-NEXT: v_or_b32_e32 v3, v26, v48
+; GISEL-NEXT: v_or_b32_e32 v2, v22, v24
+; GISEL-NEXT: v_or_b32_e32 v3, v48, v26
; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v33, vcc
; GISEL-NEXT: v_addc_u32_e32 v34, vcc, -1, v34, vcc
; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v35, v3
@@ -2197,7 +2197,7 @@ define <2 x i128> @v_srem_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_subb_u32_e32 v27, vcc, v27, v24, vcc
; GISEL-NEXT: v_subb_u32_e32 v24, vcc, v2, v48, vcc
; GISEL-NEXT: v_subb_u32_e32 v25, vcc, v25, v49, vcc
-; GISEL-NEXT: v_or_b32_e32 v18, v18, v39
+; GISEL-NEXT: v_or_b32_e32 v18, v39, v18
; GISEL-NEXT: v_mov_b32_e32 v0, v22
; GISEL-NEXT: v_mov_b32_e32 v1, v23
; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
@@ -2209,7 +2209,7 @@ define <2 x i128> @v_srem_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_lshl_b64 v[2:3], v[20:21], 1
; GISEL-NEXT: v_lshl_b64 v[18:19], v[18:19], 1
; GISEL-NEXT: v_lshrrev_b32_e32 v20, 31, v21
-; GISEL-NEXT: v_or_b32_e32 v18, v18, v20
+; GISEL-NEXT: v_or_b32_e32 v18, v20, v18
; GISEL-NEXT: v_or_b32_e32 v31, v0, v2
; GISEL-NEXT: v_or_b32_e32 v32, v1, v3
; GISEL-NEXT: .LBB2_6: ; %Flow16
@@ -2366,9 +2366,9 @@ define <2 x i128> @v_srem_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_addc_u32_e32 v37, vcc, -1, v37, vcc
; GISEL-NEXT: v_or_b32_e32 v20, v0, v2
; GISEL-NEXT: v_or_b32_e32 v21, v1, v3
-; GISEL-NEXT: v_or_b32_e32 v2, v26, v24
-; GISEL-NEXT: v_or_b32_e32 v3, v52, v25
-; GISEL-NEXT: v_or_b32_e32 v14, v14, v22
+; GISEL-NEXT: v_or_b32_e32 v2, v24, v26
+; GISEL-NEXT: v_or_b32_e32 v3, v25, v52
+; GISEL-NEXT: v_or_b32_e32 v14, v22, v14
; GISEL-NEXT: v_addc_u32_e32 v38, vcc, -1, v38, vcc
; GISEL-NEXT: v_addc_u32_e32 v39, vcc, -1, v39, vcc
; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v48, v3
@@ -2400,7 +2400,7 @@ define <2 x i128> @v_srem_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_lshl_b64 v[22:23], v[20:21], 1
; GISEL-NEXT: v_lshl_b64 v[2:3], v[14:15], 1
; GISEL-NEXT: v_lshrrev_b32_e32 v14, 31, v21
-; GISEL-NEXT: v_or_b32_e32 v2, v2, v14
+; GISEL-NEXT: v_or_b32_e32 v2, v14, v2
; GISEL-NEXT: v_or_b32_e32 v20, v0, v22
; GISEL-NEXT: v_or_b32_e32 v21, v1, v23
; GISEL-NEXT: .LBB2_12: ; %Flow12
@@ -3002,8 +3002,8 @@ define <2 x i128> @v_urem_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_lshl_b64 v[20:21], v[20:21], 1
; GISEL-NEXT: v_or_b32_e32 v22, v16, v18
; GISEL-NEXT: v_or_b32_e32 v23, v17, v19
-; GISEL-NEXT: v_or_b32_e32 v18, v26, v24
-; GISEL-NEXT: v_or_b32_e32 v19, v28, v39
+; GISEL-NEXT: v_or_b32_e32 v18, v24, v26
+; GISEL-NEXT: v_or_b32_e32 v19, v39, v28
; GISEL-NEXT: v_addc_u32_e32 v32, vcc, -1, v32, vcc
; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v33, vcc
; GISEL-NEXT: v_sub_i32_e32 v16, vcc, v34, v19
@@ -3024,7 +3024,7 @@ define <2 x i128> @v_urem_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_subb_u32_e32 v29, vcc, v29, v26, vcc
; GISEL-NEXT: v_subb_u32_e32 v26, vcc, v18, v39, vcc
; GISEL-NEXT: v_subb_u32_e32 v27, vcc, v27, v48, vcc
-; GISEL-NEXT: v_or_b32_e32 v20, v20, v38
+; GISEL-NEXT: v_or_b32_e32 v20, v38, v20
; GISEL-NEXT: v_mov_b32_e32 v16, v24
; GISEL-NEXT: v_mov_b32_e32 v17, v25
; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
@@ -3036,7 +3036,7 @@ define <2 x i128> @v_urem_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_lshl_b64 v[18:19], v[22:23], 1
; GISEL-NEXT: v_lshl_b64 v[20:21], v[20:21], 1
; GISEL-NEXT: v_lshrrev_b32_e32 v22, 31, v23
-; GISEL-NEXT: v_or_b32_e32 v20, v20, v22
+; GISEL-NEXT: v_or_b32_e32 v20, v22, v20
; GISEL-NEXT: v_or_b32_e32 v32, v16, v18
; GISEL-NEXT: v_or_b32_e32 v33, v17, v19
; GISEL-NEXT: .LBB3_6: ; %Flow16
@@ -3175,9 +3175,9 @@ define <2 x i128> @v_urem_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_addc_u32_e32 v35, vcc, -1, v35, vcc
; GISEL-NEXT: v_or_b32_e32 v24, v16, v18
; GISEL-NEXT: v_or_b32_e32 v25, v17, v19
-; GISEL-NEXT: v_or_b32_e32 v18, v30, v28
-; GISEL-NEXT: v_or_b32_e32 v19, v50, v29
-; GISEL-NEXT: v_or_b32_e32 v22, v22, v26
+; GISEL-NEXT: v_or_b32_e32 v18, v28, v30
+; GISEL-NEXT: v_or_b32_e32 v19, v29, v50
+; GISEL-NEXT: v_or_b32_e32 v22, v26, v22
; GISEL-NEXT: v_addc_u32_e32 v36, vcc, -1, v36, vcc
; GISEL-NEXT: v_addc_u32_e32 v37, vcc, -1, v37, vcc
; GISEL-NEXT: v_sub_i32_e32 v16, vcc, v38, v19
@@ -3209,7 +3209,7 @@ define <2 x i128> @v_urem_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_lshl_b64 v[26:27], v[24:25], 1
; GISEL-NEXT: v_lshl_b64 v[18:19], v[22:23], 1
; GISEL-NEXT: v_lshrrev_b32_e32 v22, 31, v25
-; GISEL-NEXT: v_or_b32_e32 v18, v18, v22
+; GISEL-NEXT: v_or_b32_e32 v18, v22, v18
; GISEL-NEXT: v_or_b32_e32 v24, v16, v26
; GISEL-NEXT: v_or_b32_e32 v25, v17, v27
; GISEL-NEXT: .LBB3_12: ; %Flow12
diff --git a/llvm/test/CodeGen/AMDGPU/itofp.i128.ll b/llvm/test/CodeGen/AMDGPU/itofp.i128.ll
index c316ec71863d0..55914e57ab68d 100644
--- a/llvm/test/CodeGen/AMDGPU/itofp.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/itofp.i128.ll
@@ -239,7 +239,7 @@ define float @sitofp_i128_to_f32(i128 %x) {
; GISEL-NEXT: ; %bb.10: ; %itofp-sw-epilog
; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
; GISEL-NEXT: v_bfe_u32 v2, v0, 2, 1
-; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v0, v2, v0
; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GISEL-NEXT: v_and_b32_e32 v2, 0x4000000, v0
@@ -484,7 +484,7 @@ define float @uitofp_i128_to_f32(i128 %x) {
; GISEL-NEXT: ; %bb.10: ; %itofp-sw-epilog
; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
; GISEL-NEXT: v_bfe_u32 v2, v0, 2, 1
-; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v0, v2, v0
; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GISEL-NEXT: v_and_b32_e32 v2, 0x4000000, v0
@@ -771,7 +771,7 @@ define double @sitofp_i128_to_f64(i128 %x) {
; GISEL-NEXT: v_lshlrev_b64 v[4:5], 1, v[4:5]
; GISEL-NEXT: v_lshlrev_b64 v[0:1], 1, v[2:3]
; GISEL-NEXT: v_lshrrev_b32_e32 v2, 31, v3
-; GISEL-NEXT: v_or_b32_e32 v2, v4, v2
+; GISEL-NEXT: v_or_b32_e32 v2, v2, v4
; GISEL-NEXT: v_mov_b32_e32 v5, v3
; GISEL-NEXT: v_mov_b32_e32 v4, v2
; GISEL-NEXT: v_mov_b32_e32 v3, v1
@@ -779,7 +779,7 @@ define double @sitofp_i128_to_f64(i128 %x) {
; GISEL-NEXT: .LBB2_10: ; %itofp-sw-epilog
; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
; GISEL-NEXT: v_bfe_u32 v0, v2, 2, 1
-; GISEL-NEXT: v_or_b32_e32 v0, v2, v0
+; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
; GISEL-NEXT: v_add_co_u32_e32 v2, vcc, 1, v0
; GISEL-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GISEL-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v4, vcc
@@ -1048,7 +1048,7 @@ define double @uitofp_i128_to_f64(i128 %x) {
; GISEL-NEXT: v_lshlrev_b64 v[8:9], 1, v[0:1]
; GISEL-NEXT: v_lshlrev_b64 v[10:11], 1, v[2:3]
; GISEL-NEXT: v_lshrrev_b32_e32 v0, 31, v1
-; GISEL-NEXT: v_or_b32_e32 v10, v10, v0
+; GISEL-NEXT: v_or_b32_e32 v10, v0, v10
; GISEL-NEXT: v_mov_b32_e32 v0, v8
; GISEL-NEXT: v_mov_b32_e32 v1, v9
; GISEL-NEXT: v_mov_b32_e32 v2, v10
@@ -1056,7 +1056,7 @@ define double @uitofp_i128_to_f64(i128 %x) {
; GISEL-NEXT: .LBB3_10: ; %itofp-sw-epilog
; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
; GISEL-NEXT: v_bfe_u32 v4, v0, 2, 1
-; GISEL-NEXT: v_or_b32_e32 v0, v0, v4
+; GISEL-NEXT: v_or_b32_e32 v0, v4, v0
; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GISEL-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
@@ -1330,7 +1330,7 @@ define half @sitofp_i128_to_f16(i128 %x) {
; GISEL-NEXT: ; %bb.10: ; %itofp-sw-epilog
; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
; GISEL-NEXT: v_bfe_u32 v2, v0, 2, 1
-; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v0, v2, v0
; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GISEL-NEXT: v_and_b32_e32 v2, 0x4000000, v0
@@ -1577,7 +1577,7 @@ define half @uitofp_i128_to_f16(i128 %x) {
; GISEL-NEXT: ; %bb.10: ; %itofp-sw-epilog
; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
; GISEL-NEXT: v_bfe_u32 v2, v0, 2, 1
-; GISEL-NEXT: v_or_b32_e32 v0, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v0, v2, v0
; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GISEL-NEXT: v_and_b32_e32 v2, 0x4000000, v0
More information about the llvm-commits
mailing list