[llvm] [CodeGen] Simplify expandRoundInexactToOdd (PR #134988)
Jay Foad via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 9 03:31:07 PDT 2025
https://github.com/jayfoad created https://github.com/llvm/llvm-project/pull/134988
FP_ROUND and FP_EXTEND the input value before FABSing it. This avoids
some bit twiddling to copy the sign bit from the input to the result. It
does introduce one extra FABS, but that is folded into another
instruction for free on AMDGPU, which is the only target currently
affected by this change.
>From c61123d156efc5f95a41ad200e6c19b5b0066178 Mon Sep 17 00:00:00 2001
From: Jay Foad <jay.foad at amd.com>
Date: Wed, 9 Apr 2025 11:21:28 +0100
Subject: [PATCH] [CodeGen] Simplify expandRoundInexactToOdd
FP_ROUND and FP_EXTEND the input value before FABSing it. This avoids
some bit twiddling to copy the sign bit from the input to the result. It
does introduce one extra FABS, but that is folded into another
instruction for free on AMDGPU, which is the only target currently
affected by this change.
---
.../CodeGen/SelectionDAG/TargetLowering.cpp | 32 +-
llvm/test/CodeGen/AMDGPU/bf16-conversions.ll | 198 +++--
llvm/test/CodeGen/AMDGPU/bf16.ll | 98 ++-
.../AMDGPU/fp_trunc_store_fp64_to_bf16.ll | 724 +++++++++---------
4 files changed, 481 insertions(+), 571 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 0f38bbd46cbca..89f806d8b1c30 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -11608,28 +11608,13 @@ SDValue TargetLowering::expandRoundInexactToOdd(EVT ResultVT, SDValue Op,
// correct for this using a trick explained in: Boldo, Sylvie, and
// Guillaume Melquiond. "When double rounding is odd." 17th IMACS
// World Congress. 2005.
- unsigned BitSize = OperandVT.getScalarSizeInBits();
- EVT WideIntVT = OperandVT.changeTypeToInteger();
- SDValue OpAsInt = DAG.getBitcast(WideIntVT, Op);
- SDValue SignBit =
- DAG.getNode(ISD::AND, dl, WideIntVT, OpAsInt,
- DAG.getConstant(APInt::getSignMask(BitSize), dl, WideIntVT));
- SDValue AbsWide;
- if (isOperationLegalOrCustom(ISD::FABS, OperandVT)) {
- AbsWide = DAG.getNode(ISD::FABS, dl, OperandVT, Op);
- } else {
- SDValue ClearedSign = DAG.getNode(
- ISD::AND, dl, WideIntVT, OpAsInt,
- DAG.getConstant(APInt::getSignedMaxValue(BitSize), dl, WideIntVT));
- AbsWide = DAG.getBitcast(OperandVT, ClearedSign);
- }
- SDValue AbsNarrow = DAG.getFPExtendOrRound(AbsWide, dl, ResultVT);
- SDValue AbsNarrowAsWide = DAG.getFPExtendOrRound(AbsNarrow, dl, OperandVT);
+ SDValue Narrow = DAG.getFPExtendOrRound(Op, dl, ResultVT);
+ SDValue NarrowAsWide = DAG.getFPExtendOrRound(Narrow, dl, OperandVT);
// We can keep the narrow value as-is if narrowing was exact (no
// rounding error), the wide value was NaN (the narrow value is also
// NaN and should be preserved) or if we rounded to the odd value.
- SDValue NarrowBits = DAG.getNode(ISD::BITCAST, dl, ResultIntVT, AbsNarrow);
+ SDValue NarrowBits = DAG.getNode(ISD::BITCAST, dl, ResultIntVT, Narrow);
SDValue One = DAG.getConstant(1, dl, ResultIntVT);
SDValue NegativeOne = DAG.getAllOnesConstant(dl, ResultIntVT);
SDValue And = DAG.getNode(ISD::AND, dl, ResultIntVT, NarrowBits, One);
@@ -11640,13 +11625,15 @@ SDValue TargetLowering::expandRoundInexactToOdd(EVT ResultVT, SDValue Op,
SDValue AlreadyOdd = DAG.getSetCC(dl, ResultIntVTCCVT, And, Zero, ISD::SETNE);
EVT WideSetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
- AbsWide.getValueType());
+ Op.getValueType());
// We keep results which are exact, odd or NaN.
SDValue KeepNarrow =
- DAG.getSetCC(dl, WideSetCCVT, AbsWide, AbsNarrowAsWide, ISD::SETUEQ);
+ DAG.getSetCC(dl, WideSetCCVT, Op, NarrowAsWide, ISD::SETUEQ);
KeepNarrow = DAG.getNode(ISD::OR, dl, WideSetCCVT, KeepNarrow, AlreadyOdd);
// We morally performed a round-down if AbsNarrow is smaller than
// AbsWide.
+ SDValue AbsWide = DAG.getNode(ISD::FABS, dl, OperandVT, Op);
+ SDValue AbsNarrowAsWide = DAG.getNode(ISD::FABS, dl, OperandVT, NarrowAsWide);
SDValue NarrowIsRd =
DAG.getSetCC(dl, WideSetCCVT, AbsWide, AbsNarrowAsWide, ISD::SETOGT);
// If the narrow value is odd or exact, pick it.
@@ -11656,11 +11643,6 @@ SDValue TargetLowering::expandRoundInexactToOdd(EVT ResultVT, SDValue Op,
SDValue Adjust = DAG.getSelect(dl, ResultIntVT, NarrowIsRd, One, NegativeOne);
SDValue Adjusted = DAG.getNode(ISD::ADD, dl, ResultIntVT, NarrowBits, Adjust);
Op = DAG.getSelect(dl, ResultIntVT, KeepNarrow, NarrowBits, Adjusted);
- int ShiftAmount = BitSize - ResultVT.getScalarSizeInBits();
- SDValue ShiftCnst = DAG.getShiftAmountConstant(ShiftAmount, WideIntVT, dl);
- SignBit = DAG.getNode(ISD::SRL, dl, WideIntVT, SignBit, ShiftCnst);
- SignBit = DAG.getNode(ISD::TRUNCATE, dl, ResultIntVT, SignBit);
- Op = DAG.getNode(ISD::OR, dl, ResultIntVT, Op, SignBit);
return DAG.getNode(ISD::BITCAST, dl, ResultVT, Op);
}
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
index 3be911ab9e7f4..a597faa028f22 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
@@ -111,75 +111,65 @@ define amdgpu_ps float @v_test_cvt_f32_bf16_v(float %src) {
define amdgpu_ps float @v_test_cvt_v2f64_v2bf16_v(<2 x double> %src) {
; GFX-942-LABEL: v_test_cvt_v2f64_v2bf16_v:
; GFX-942: ; %bb.0:
-; GFX-942-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
+; GFX-942-NEXT: v_cvt_f32_f64_e32 v6, v[0:1]
; GFX-942-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
; GFX-942-NEXT: v_and_b32_e32 v7, 1, v6
-; GFX-942-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
-; GFX-942-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
-; GFX-942-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
+; GFX-942-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, |v[4:5]|
+; GFX-942-NEXT: v_cmp_nlg_f64_e32 vcc, v[0:1], v[4:5]
+; GFX-942-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v7
; GFX-942-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
; GFX-942-NEXT: v_add_u32_e32 v4, v6, v4
-; GFX-942-NEXT: s_or_b64 vcc, s[0:1], vcc
+; GFX-942-NEXT: s_or_b64 vcc, vcc, s[0:1]
; GFX-942-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
-; GFX-942-NEXT: s_brev_b32 s4, 1
-; GFX-942-NEXT: v_and_or_b32 v5, v1, s4, v4
-; GFX-942-NEXT: v_bfe_u32 v4, v4, 16, 1
-; GFX-942-NEXT: s_movk_i32 s5, 0x7fff
-; GFX-942-NEXT: v_add3_u32 v4, v4, v5, s5
-; GFX-942-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX-942-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX-942-NEXT: s_movk_i32 s4, 0x7fff
+; GFX-942-NEXT: v_add3_u32 v5, v5, v4, s4
+; GFX-942-NEXT: v_or_b32_e32 v4, 0x400000, v4
; GFX-942-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
; GFX-942-NEXT: s_nop 1
-; GFX-942-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
-; GFX-942-NEXT: v_cvt_f32_f64_e64 v5, |v[2:3]|
+; GFX-942-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX-942-NEXT: v_cvt_f32_f64_e32 v5, v[2:3]
; GFX-942-NEXT: v_cvt_f64_f32_e32 v[0:1], v5
; GFX-942-NEXT: v_and_b32_e32 v6, 1, v5
-; GFX-942-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[2:3]|, v[0:1]
-; GFX-942-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[2:3]|, v[0:1]
-; GFX-942-NEXT: v_cmp_eq_u32_e32 vcc, 1, v6
+; GFX-942-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[2:3]|, |v[0:1]|
+; GFX-942-NEXT: v_cmp_nlg_f64_e32 vcc, v[2:3], v[0:1]
+; GFX-942-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v6
; GFX-942-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[2:3]
; GFX-942-NEXT: v_add_u32_e32 v0, v5, v0
-; GFX-942-NEXT: s_or_b64 vcc, s[0:1], vcc
+; GFX-942-NEXT: s_or_b64 vcc, vcc, s[0:1]
; GFX-942-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc
-; GFX-942-NEXT: v_and_or_b32 v1, v3, s4, v0
-; GFX-942-NEXT: v_bfe_u32 v0, v0, 16, 1
-; GFX-942-NEXT: v_add3_u32 v0, v0, v1, s5
-; GFX-942-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; GFX-942-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX-942-NEXT: v_add3_u32 v1, v1, v0, s4
+; GFX-942-NEXT: v_or_b32_e32 v0, 0x400000, v0
; GFX-942-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[2:3]
; GFX-942-NEXT: s_mov_b32 s0, 0x7060302
; GFX-942-NEXT: s_nop 0
-; GFX-942-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX-942-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; GFX-942-NEXT: v_perm_b32 v0, v0, v4, s0
; GFX-942-NEXT: ; return to shader part epilog
;
; GFX-950-LABEL: v_test_cvt_v2f64_v2bf16_v:
; GFX-950: ; %bb.0:
-; GFX-950-NEXT: v_mov_b32_e32 v4, v3
-; GFX-950-NEXT: v_and_b32_e32 v3, 0x7fffffff, v4
-; GFX-950-NEXT: v_mov_b32_e32 v5, v1
-; GFX-950-NEXT: v_cvt_f32_f64_e32 v1, v[2:3]
-; GFX-950-NEXT: v_cvt_f64_f32_e32 v[6:7], v1
-; GFX-950-NEXT: v_and_b32_e32 v8, 1, v1
-; GFX-950-NEXT: v_cmp_gt_f64_e64 s[2:3], v[2:3], v[6:7]
-; GFX-950-NEXT: v_cmp_nlg_f64_e32 vcc, v[2:3], v[6:7]
-; GFX-950-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v8
+; GFX-950-NEXT: v_cvt_f32_f64_e32 v6, v[2:3]
+; GFX-950-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
+; GFX-950-NEXT: v_and_b32_e32 v7, 1, v6
+; GFX-950-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[2:3]|, |v[4:5]|
+; GFX-950-NEXT: v_cmp_nlg_f64_e32 vcc, v[2:3], v[4:5]
+; GFX-950-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v7
; GFX-950-NEXT: v_cndmask_b32_e64 v2, -1, 1, s[2:3]
-; GFX-950-NEXT: v_add_u32_e32 v2, v1, v2
+; GFX-950-NEXT: v_add_u32_e32 v2, v6, v2
; GFX-950-NEXT: s_or_b64 vcc, vcc, s[0:1]
-; GFX-950-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
-; GFX-950-NEXT: s_brev_b32 s4, 1
-; GFX-950-NEXT: v_and_or_b32 v4, v4, s4, v1
-; GFX-950-NEXT: v_and_b32_e32 v1, 0x7fffffff, v5
-; GFX-950-NEXT: v_cvt_f32_f64_e32 v6, v[0:1]
-; GFX-950-NEXT: v_cvt_f64_f32_e32 v[2:3], v6
-; GFX-950-NEXT: v_and_b32_e32 v7, 1, v6
-; GFX-950-NEXT: v_cmp_gt_f64_e64 s[2:3], v[0:1], v[2:3]
+; GFX-950-NEXT: v_cvt_f32_f64_e32 v5, v[0:1]
+; GFX-950-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX-950-NEXT: v_cvt_f64_f32_e32 v[2:3], v5
+; GFX-950-NEXT: v_and_b32_e32 v6, 1, v5
+; GFX-950-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, |v[2:3]|
; GFX-950-NEXT: v_cmp_nlg_f64_e32 vcc, v[0:1], v[2:3]
-; GFX-950-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v7
+; GFX-950-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v6
; GFX-950-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[2:3]
-; GFX-950-NEXT: v_add_u32_e32 v0, v6, v0
+; GFX-950-NEXT: v_add_u32_e32 v0, v5, v0
; GFX-950-NEXT: s_or_b64 vcc, vcc, s[0:1]
-; GFX-950-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc
-; GFX-950-NEXT: v_and_or_b32 v0, v5, s4, v0
+; GFX-950-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v4
; GFX-950-NEXT: ; return to shader part epilog
%res = fptrunc <2 x double> %src to <2 x bfloat>
@@ -348,42 +338,38 @@ entry:
define amdgpu_ps void @fptrunc_f64_to_bf16(double %a, ptr %out) {
; GFX-942-LABEL: fptrunc_f64_to_bf16:
; GFX-942: ; %bb.0: ; %entry
-; GFX-942-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
+; GFX-942-NEXT: v_cvt_f32_f64_e32 v6, v[0:1]
; GFX-942-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
; GFX-942-NEXT: v_and_b32_e32 v7, 1, v6
-; GFX-942-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
-; GFX-942-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
-; GFX-942-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
+; GFX-942-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, |v[4:5]|
+; GFX-942-NEXT: v_cmp_nlg_f64_e32 vcc, v[0:1], v[4:5]
+; GFX-942-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v7
; GFX-942-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
; GFX-942-NEXT: v_add_u32_e32 v4, v6, v4
-; GFX-942-NEXT: s_or_b64 vcc, s[0:1], vcc
+; GFX-942-NEXT: s_or_b64 vcc, vcc, s[0:1]
; GFX-942-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
-; GFX-942-NEXT: s_brev_b32 s0, 1
-; GFX-942-NEXT: v_and_or_b32 v5, v1, s0, v4
-; GFX-942-NEXT: v_bfe_u32 v4, v4, 16, 1
+; GFX-942-NEXT: v_bfe_u32 v5, v4, 16, 1
; GFX-942-NEXT: s_movk_i32 s0, 0x7fff
-; GFX-942-NEXT: v_add3_u32 v4, v4, v5, s0
-; GFX-942-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX-942-NEXT: v_add3_u32 v5, v5, v4, s0
+; GFX-942-NEXT: v_or_b32_e32 v4, 0x400000, v4
; GFX-942-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
; GFX-942-NEXT: s_nop 1
-; GFX-942-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX-942-NEXT: v_cndmask_b32_e32 v0, v5, v4, vcc
; GFX-942-NEXT: flat_store_short_d16_hi v[2:3], v0
; GFX-942-NEXT: s_endpgm
;
; GFX-950-LABEL: fptrunc_f64_to_bf16:
; GFX-950: ; %bb.0: ; %entry
-; GFX-950-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
+; GFX-950-NEXT: v_cvt_f32_f64_e32 v6, v[0:1]
; GFX-950-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
; GFX-950-NEXT: v_and_b32_e32 v7, 1, v6
-; GFX-950-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
-; GFX-950-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
-; GFX-950-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
+; GFX-950-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, |v[4:5]|
+; GFX-950-NEXT: v_cmp_nlg_f64_e32 vcc, v[0:1], v[4:5]
+; GFX-950-NEXT: v_cmp_eq_u32_e64 s[0:1], 1, v7
; GFX-950-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[2:3]
; GFX-950-NEXT: v_add_u32_e32 v0, v6, v0
-; GFX-950-NEXT: s_or_b64 vcc, s[0:1], vcc
+; GFX-950-NEXT: s_or_b64 vcc, vcc, s[0:1]
; GFX-950-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc
-; GFX-950-NEXT: s_brev_b32 s0, 1
-; GFX-950-NEXT: v_and_or_b32 v0, v1, s0, v0
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
; GFX-950-NEXT: flat_store_short v[2:3], v0
; GFX-950-NEXT: s_endpgm
@@ -396,44 +382,38 @@ entry:
define amdgpu_ps void @fptrunc_f64_to_bf16_neg(double %a, ptr %out) {
; GFX-942-LABEL: fptrunc_f64_to_bf16_neg:
; GFX-942: ; %bb.0: ; %entry
-; GFX-942-NEXT: v_cvt_f32_f64_e64 v7, |v[0:1]|
-; GFX-942-NEXT: v_cvt_f64_f32_e32 v[4:5], v7
-; GFX-942-NEXT: v_and_b32_e32 v8, 1, v7
-; GFX-942-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
-; GFX-942-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
-; GFX-942-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8
+; GFX-942-NEXT: v_cvt_f32_f64_e64 v6, -v[0:1]
+; GFX-942-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
+; GFX-942-NEXT: v_and_b32_e32 v7, 1, v6
+; GFX-942-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, |v[4:5]|
+; GFX-942-NEXT: v_cmp_nlg_f64_e64 s[0:1], -v[0:1], v[4:5]
+; GFX-942-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
; GFX-942-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
-; GFX-942-NEXT: v_add_u32_e32 v4, v7, v4
+; GFX-942-NEXT: v_add_u32_e32 v4, v6, v4
; GFX-942-NEXT: s_or_b64 vcc, s[0:1], vcc
-; GFX-942-NEXT: s_brev_b32 s4, 1
-; GFX-942-NEXT: v_xor_b32_e32 v6, 0x80000000, v1
-; GFX-942-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
-; GFX-942-NEXT: v_and_or_b32 v5, v6, s4, v4
-; GFX-942-NEXT: v_bfe_u32 v4, v4, 16, 1
+; GFX-942-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX-942-NEXT: v_bfe_u32 v5, v4, 16, 1
; GFX-942-NEXT: s_movk_i32 s0, 0x7fff
-; GFX-942-NEXT: v_add3_u32 v4, v4, v5, s0
-; GFX-942-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX-942-NEXT: v_add3_u32 v5, v5, v4, s0
+; GFX-942-NEXT: v_or_b32_e32 v4, 0x400000, v4
; GFX-942-NEXT: v_cmp_u_f64_e64 vcc, -v[0:1], -v[0:1]
; GFX-942-NEXT: s_nop 1
-; GFX-942-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX-942-NEXT: v_cndmask_b32_e32 v0, v5, v4, vcc
; GFX-942-NEXT: flat_store_short_d16_hi v[2:3], v0
; GFX-942-NEXT: s_endpgm
;
; GFX-950-LABEL: fptrunc_f64_to_bf16_neg:
; GFX-950: ; %bb.0: ; %entry
-; GFX-950-NEXT: v_cvt_f32_f64_e64 v7, |v[0:1]|
-; GFX-950-NEXT: v_cvt_f64_f32_e32 v[4:5], v7
-; GFX-950-NEXT: v_and_b32_e32 v8, 1, v7
-; GFX-950-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
-; GFX-950-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
-; GFX-950-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8
+; GFX-950-NEXT: v_cvt_f32_f64_e64 v6, -v[0:1]
+; GFX-950-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
+; GFX-950-NEXT: v_and_b32_e32 v7, 1, v6
+; GFX-950-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, |v[4:5]|
+; GFX-950-NEXT: v_cmp_nlg_f64_e64 s[0:1], -v[0:1], v[4:5]
+; GFX-950-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
; GFX-950-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[2:3]
-; GFX-950-NEXT: v_add_u32_e32 v0, v7, v0
+; GFX-950-NEXT: v_add_u32_e32 v0, v6, v0
; GFX-950-NEXT: s_or_b64 vcc, s[0:1], vcc
-; GFX-950-NEXT: s_brev_b32 s4, 1
-; GFX-950-NEXT: v_xor_b32_e32 v6, 0x80000000, v1
-; GFX-950-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc
-; GFX-950-NEXT: v_and_or_b32 v0, v6, s4, v0
+; GFX-950-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
; GFX-950-NEXT: flat_store_short v[2:3], v0
; GFX-950-NEXT: s_endpgm
@@ -447,44 +427,38 @@ entry:
define amdgpu_ps void @fptrunc_f64_to_bf16_abs(double %a, ptr %out) {
; GFX-942-LABEL: fptrunc_f64_to_bf16_abs:
; GFX-942: ; %bb.0: ; %entry
-; GFX-942-NEXT: v_cvt_f32_f64_e64 v7, |v[0:1]|
-; GFX-942-NEXT: v_cvt_f64_f32_e32 v[4:5], v7
-; GFX-942-NEXT: v_and_b32_e32 v8, 1, v7
-; GFX-942-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
+; GFX-942-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
+; GFX-942-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
+; GFX-942-NEXT: v_and_b32_e32 v7, 1, v6
+; GFX-942-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, |v[4:5]|
; GFX-942-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
-; GFX-942-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8
+; GFX-942-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
; GFX-942-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
-; GFX-942-NEXT: v_add_u32_e32 v4, v7, v4
+; GFX-942-NEXT: v_add_u32_e32 v4, v6, v4
; GFX-942-NEXT: s_or_b64 vcc, s[0:1], vcc
-; GFX-942-NEXT: v_and_b32_e32 v6, 0x7fffffff, v1
-; GFX-942-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
-; GFX-942-NEXT: s_brev_b32 s0, 1
-; GFX-942-NEXT: v_and_or_b32 v5, v6, s0, v4
-; GFX-942-NEXT: v_bfe_u32 v4, v4, 16, 1
+; GFX-942-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX-942-NEXT: v_bfe_u32 v5, v4, 16, 1
; GFX-942-NEXT: s_movk_i32 s0, 0x7fff
-; GFX-942-NEXT: v_add3_u32 v4, v4, v5, s0
-; GFX-942-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX-942-NEXT: v_add3_u32 v5, v5, v4, s0
+; GFX-942-NEXT: v_or_b32_e32 v4, 0x400000, v4
; GFX-942-NEXT: v_cmp_u_f64_e64 vcc, |v[0:1]|, |v[0:1]|
; GFX-942-NEXT: s_nop 1
-; GFX-942-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX-942-NEXT: v_cndmask_b32_e32 v0, v5, v4, vcc
; GFX-942-NEXT: flat_store_short_d16_hi v[2:3], v0
; GFX-942-NEXT: s_endpgm
;
; GFX-950-LABEL: fptrunc_f64_to_bf16_abs:
; GFX-950: ; %bb.0: ; %entry
-; GFX-950-NEXT: v_cvt_f32_f64_e64 v7, |v[0:1]|
-; GFX-950-NEXT: v_cvt_f64_f32_e32 v[4:5], v7
-; GFX-950-NEXT: v_and_b32_e32 v8, 1, v7
-; GFX-950-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
+; GFX-950-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
+; GFX-950-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
+; GFX-950-NEXT: v_and_b32_e32 v7, 1, v6
+; GFX-950-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, |v[4:5]|
; GFX-950-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
-; GFX-950-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8
+; GFX-950-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
; GFX-950-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[2:3]
-; GFX-950-NEXT: v_add_u32_e32 v0, v7, v0
+; GFX-950-NEXT: v_add_u32_e32 v0, v6, v0
; GFX-950-NEXT: s_or_b64 vcc, s[0:1], vcc
-; GFX-950-NEXT: v_and_b32_e32 v6, 0x7fffffff, v1
-; GFX-950-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc
-; GFX-950-NEXT: s_brev_b32 s0, 1
-; GFX-950-NEXT: v_and_or_b32 v0, v6, s0, v0
+; GFX-950-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc
; GFX-950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
; GFX-950-NEXT: flat_store_short v[2:3], v0
; GFX-950-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 8582b61bbbd82..d845cadab6f7e 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -2287,24 +2287,22 @@ define void @test_load_store_f64_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
-; GFX8-NEXT: v_and_b32_e32 v7, 0x80000000, v1
+; GFX8-NEXT: v_cvt_f32_f64_e32 v6, v[0:1]
; GFX8-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
-; GFX8-NEXT: v_and_b32_e32 v8, 1, v6
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8
-; GFX8-NEXT: v_cmp_gt_f64_e64 s[4:5], |v[0:1]|, v[4:5]
-; GFX8-NEXT: v_cmp_nlg_f64_e64 s[6:7], |v[0:1]|, v[4:5]
-; GFX8-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[4:5]
-; GFX8-NEXT: v_add_u32_e64 v4, s[4:5], v6, v4
-; GFX8-NEXT: s_or_b64 vcc, s[6:7], vcc
+; GFX8-NEXT: v_and_b32_e32 v7, 1, v6
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v7
+; GFX8-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[0:1]|, |v[4:5]|
+; GFX8-NEXT: v_cmp_nlg_f64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[6:7]
+; GFX8-NEXT: v_add_u32_e64 v4, s[6:7], v6, v4
+; GFX8-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
-; GFX8-NEXT: v_or_b32_e32 v5, v4, v7
-; GFX8-NEXT: v_bfe_u32 v4, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v5
+; GFX8-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v5, v4
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX8-NEXT: flat_store_short v[2:3], v0
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -2314,25 +2312,23 @@ define void @test_load_store_f64_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
-; GFX9-NEXT: s_brev_b32 s8, 1
-; GFX9-NEXT: s_movk_i32 s9, 0x7fff
+; GFX9-NEXT: s_movk_i32 s8, 0x7fff
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
+; GFX9-NEXT: v_cvt_f32_f64_e32 v6, v[0:1]
; GFX9-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
; GFX9-NEXT: v_and_b32_e32 v7, 1, v6
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
-; GFX9-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[0:1]|, v[4:5]
-; GFX9-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[0:1]|, v[4:5]
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v7
+; GFX9-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[0:1]|, |v[4:5]|
+; GFX9-NEXT: v_cmp_nlg_f64_e32 vcc, v[0:1], v[4:5]
; GFX9-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[6:7]
; GFX9-NEXT: v_add_u32_e32 v4, v6, v4
-; GFX9-NEXT: s_or_b64 vcc, s[4:5], vcc
+; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
-; GFX9-NEXT: v_and_or_b32 v5, v1, s8, v4
-; GFX9-NEXT: v_bfe_u32 v4, v4, 16, 1
-; GFX9-NEXT: v_add3_u32 v4, v4, v5, s9
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
-; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; GFX9-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v4
+; GFX9-NEXT: v_add3_u32 v4, v5, v4, s8
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc
; GFX9-NEXT: global_store_short_d16_hi v[2:3], v0, off
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -2342,22 +2338,21 @@ define void @test_load_store_f64_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
+; GFX10-NEXT: v_cvt_f32_f64_e32 v6, v[0:1]
; GFX10-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
; GFX10-NEXT: v_and_b32_e32 v7, 1, v6
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v7
-; GFX10-NEXT: v_cmp_gt_f64_e64 s5, |v[0:1]|, v[4:5]
-; GFX10-NEXT: v_cmp_nlg_f64_e64 s4, |v[0:1]|, v[4:5]
-; GFX10-NEXT: v_cndmask_b32_e64 v4, -1, 1, s5
-; GFX10-NEXT: s_or_b32 vcc_lo, s4, vcc_lo
+; GFX10-NEXT: v_cmp_gt_f64_e64 s4, |v[0:1]|, |v[4:5]|
+; GFX10-NEXT: v_cmp_nlg_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_cndmask_b32_e64 v4, -1, 1, s4
+; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 1, v7
; GFX10-NEXT: v_add_nc_u32_e32 v4, v6, v4
+; GFX10-NEXT: s_or_b32 vcc_lo, vcc_lo, s4
; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[0:1]
-; GFX10-NEXT: v_and_or_b32 v5, 0x80000000, v1, v4
-; GFX10-NEXT: v_bfe_u32 v4, v4, 16, 1
-; GFX10-NEXT: v_add3_u32 v4, v4, v5, 0x7fff
-; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v5
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
+; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v5, v4, vcc_lo
; GFX10-NEXT: global_store_short_d16_hi v[2:3], v0, off
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
@@ -2366,27 +2361,26 @@ define void @test_load_store_f64_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: global_load_b64 v[0:1], v[0:1], off
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cvt_f32_f64_e32 v6, v[0:1]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
; GFX11-NEXT: v_and_b32_e32 v7, 1, v6
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_gt_f64_e64 s1, |v[0:1]|, v[4:5]
-; GFX11-NEXT: v_cmp_nlg_f64_e64 s0, |v[0:1]|, v[4:5]
-; GFX11-NEXT: v_cndmask_b32_e64 v4, -1, 1, s1
-; GFX11-NEXT: s_or_b32 vcc_lo, s0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_gt_f64_e64 s0, |v[0:1]|, |v[4:5]|
+; GFX11-NEXT: v_cmp_nlg_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e64 v4, -1, 1, s0
+; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 1, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v4
+; GFX11-NEXT: s_or_b32 vcc_lo, vcc_lo, s0
; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
; GFX11-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[0:1]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_and_or_b32 v5, 0x80000000, v1, v4
-; GFX11-NEXT: v_bfe_u32 v4, v4, 16, 1
-; GFX11-NEXT: v_add3_u32 v4, v4, v5, 0x7fff
-; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v5, v4, vcc_lo
; GFX11-NEXT: global_store_d16_hi_b16 v[2:3], v0, off
; GFX11-NEXT: s_setpc_b64 s[30:31]
%val = load double, ptr addrspace(1) %in
diff --git a/llvm/test/CodeGen/AMDGPU/fp_trunc_store_fp64_to_bf16.ll b/llvm/test/CodeGen/AMDGPU/fp_trunc_store_fp64_to_bf16.ll
index d824763c22e27..c685ad75b47f7 100644
--- a/llvm/test/CodeGen/AMDGPU/fp_trunc_store_fp64_to_bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp_trunc_store_fp64_to_bf16.ll
@@ -5,24 +5,22 @@ define void @scalar(double %num, ptr addrspace(1) %p) {
; CHECK-LABEL: scalar:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
+; CHECK-NEXT: v_cvt_f32_f64_e32 v6, v[0:1]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
; CHECK-NEXT: v_and_b32_e32 v7, 1, v6
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[0:1]|, v[4:5]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[0:1]|, v[4:5]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[0:1]|, |v[4:5]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[0:1], v[4:5]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v7
; CHECK-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v4, v6, v4
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
-; CHECK-NEXT: s_brev_b32 s4, 1
-; CHECK-NEXT: v_and_or_b32 v5, v1, s4, v4
-; CHECK-NEXT: v_bfe_u32 v4, v4, 16, 1
+; CHECK-NEXT: v_bfe_u32 v5, v4, 16, 1
; CHECK-NEXT: s_movk_i32 s4, 0x7fff
-; CHECK-NEXT: v_add3_u32 v4, v4, v5, s4
-; CHECK-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; CHECK-NEXT: v_add3_u32 v5, v5, v4, s4
+; CHECK-NEXT: v_or_b32_e32 v4, 0x400000, v4
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v5, v4, vcc
; CHECK-NEXT: global_store_short_d16_hi v[2:3], v0, off
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
@@ -36,40 +34,37 @@ define void @v2(<2 x double> %num, ptr addrspace(1) %p) {
; CHECK-LABEL: v2:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cvt_f32_f64_e64 v8, |v[0:1]|
+; CHECK-NEXT: v_cvt_f32_f64_e32 v8, v[0:1]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[6:7], v8
; CHECK-NEXT: v_and_b32_e32 v9, 1, v8
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[0:1]|, v[6:7]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[0:1]|, v[6:7]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v9
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[0:1]|, |v[6:7]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[0:1], v[6:7]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v9
; CHECK-NEXT: v_cndmask_b32_e64 v6, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v6, v8, v6
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc
-; CHECK-NEXT: s_brev_b32 s8, 1
-; CHECK-NEXT: v_and_or_b32 v7, v1, s8, v6
-; CHECK-NEXT: v_bfe_u32 v6, v6, 16, 1
-; CHECK-NEXT: s_movk_i32 s9, 0x7fff
-; CHECK-NEXT: v_add3_u32 v6, v6, v7, s9
-; CHECK-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; CHECK-NEXT: v_bfe_u32 v7, v6, 16, 1
+; CHECK-NEXT: s_movk_i32 s8, 0x7fff
+; CHECK-NEXT: v_add3_u32 v7, v7, v6, s8
+; CHECK-NEXT: v_or_b32_e32 v6, 0x400000, v6
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
-; CHECK-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v7, |v[2:3]|
+; CHECK-NEXT: v_cndmask_b32_e32 v6, v7, v6, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v7, v[2:3]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[0:1], v7
; CHECK-NEXT: v_and_b32_e32 v8, 1, v7
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[2:3]|, v[0:1]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[2:3]|, v[0:1]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[2:3]|, |v[0:1]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[2:3], v[0:1]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v8
; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v0, v7, v0
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc
-; CHECK-NEXT: v_and_or_b32 v1, v3, s8, v0
-; CHECK-NEXT: v_bfe_u32 v0, v0, 16, 1
-; CHECK-NEXT: v_add3_u32 v0, v0, v1, s9
-; CHECK-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; CHECK-NEXT: v_bfe_u32 v1, v0, 16, 1
+; CHECK-NEXT: v_add3_u32 v1, v1, v0, s8
+; CHECK-NEXT: v_or_b32_e32 v0, 0x400000, v0
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[2:3]
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; CHECK-NEXT: s_mov_b32 s4, 0x7060302
; CHECK-NEXT: v_perm_b32 v0, v0, v6, s4
; CHECK-NEXT: global_store_dword v[4:5], v0, off
@@ -85,58 +80,54 @@ define void @v3(<3 x double> %num, ptr addrspace(1) %p) {
; CHECK-LABEL: v3:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cvt_f32_f64_e64 v10, |v[0:1]|
+; CHECK-NEXT: v_cvt_f32_f64_e32 v10, v[0:1]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[8:9], v10
; CHECK-NEXT: v_and_b32_e32 v11, 1, v10
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[0:1]|, v[8:9]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[0:1]|, v[8:9]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v11
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[0:1]|, |v[8:9]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[0:1], v[8:9]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v11
; CHECK-NEXT: v_cndmask_b32_e64 v8, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v8, v10, v8
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
-; CHECK-NEXT: s_brev_b32 s8, 1
-; CHECK-NEXT: v_and_or_b32 v9, v1, s8, v8
-; CHECK-NEXT: v_bfe_u32 v8, v8, 16, 1
-; CHECK-NEXT: s_movk_i32 s9, 0x7fff
-; CHECK-NEXT: v_add3_u32 v8, v8, v9, s9
-; CHECK-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; CHECK-NEXT: v_bfe_u32 v9, v8, 16, 1
+; CHECK-NEXT: s_movk_i32 s8, 0x7fff
+; CHECK-NEXT: v_add3_u32 v9, v9, v8, s8
+; CHECK-NEXT: v_or_b32_e32 v8, 0x400000, v8
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
-; CHECK-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v9, |v[2:3]|
+; CHECK-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v9, v[2:3]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[0:1], v9
; CHECK-NEXT: v_and_b32_e32 v10, 1, v9
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[2:3]|, v[0:1]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[2:3]|, v[0:1]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v10
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[2:3]|, |v[0:1]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[2:3], v[0:1]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v10
; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v0, v9, v0
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v9, vcc
-; CHECK-NEXT: v_and_or_b32 v1, v3, s8, v0
-; CHECK-NEXT: v_bfe_u32 v0, v0, 16, 1
-; CHECK-NEXT: v_add3_u32 v0, v0, v1, s9
-; CHECK-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; CHECK-NEXT: v_bfe_u32 v1, v0, 16, 1
+; CHECK-NEXT: v_add3_u32 v1, v1, v0, s8
+; CHECK-NEXT: v_or_b32_e32 v0, 0x400000, v0
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[2:3]
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; CHECK-NEXT: s_mov_b32 s4, 0x7060302
-; CHECK-NEXT: v_cvt_f32_f64_e64 v3, |v[4:5]|
+; CHECK-NEXT: v_cvt_f32_f64_e32 v3, v[4:5]
; CHECK-NEXT: v_perm_b32 v2, v0, v8, s4
; CHECK-NEXT: v_cvt_f64_f32_e32 v[0:1], v3
; CHECK-NEXT: v_and_b32_e32 v8, 1, v3
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[4:5]|, v[0:1]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[4:5]|, v[0:1]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[4:5]|, |v[0:1]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[4:5], v[0:1]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v8
; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v0, v3, v0
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
-; CHECK-NEXT: v_and_or_b32 v1, v5, s8, v0
-; CHECK-NEXT: v_bfe_u32 v0, v0, 16, 1
-; CHECK-NEXT: v_add3_u32 v0, v0, v1, s9
-; CHECK-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; CHECK-NEXT: v_bfe_u32 v1, v0, 16, 1
+; CHECK-NEXT: v_add3_u32 v1, v1, v0, s8
+; CHECK-NEXT: v_or_b32_e32 v0, 0x400000, v0
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[4:5]
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; CHECK-NEXT: global_store_short_d16_hi v[6:7], v0, off offset:4
; CHECK-NEXT: global_store_dword v[6:7], v2, off
; CHECK-NEXT: s_waitcnt vmcnt(0)
@@ -151,75 +142,70 @@ define void @v4(<4 x double> %num, ptr addrspace(1) %p) {
; CHECK-LABEL: v4:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cvt_f32_f64_e64 v12, |v[4:5]|
+; CHECK-NEXT: v_cvt_f32_f64_e32 v12, v[4:5]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[10:11], v12
; CHECK-NEXT: v_and_b32_e32 v13, 1, v12
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[4:5]|, v[10:11]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[4:5]|, v[10:11]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v13
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[4:5]|, |v[10:11]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[4:5], v[10:11]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v13
; CHECK-NEXT: v_cndmask_b32_e64 v10, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v10, v12, v10
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v10, v10, v12, vcc
-; CHECK-NEXT: s_brev_b32 s8, 1
-; CHECK-NEXT: v_and_or_b32 v11, v5, s8, v10
-; CHECK-NEXT: v_bfe_u32 v10, v10, 16, 1
-; CHECK-NEXT: s_movk_i32 s9, 0x7fff
-; CHECK-NEXT: v_add3_u32 v10, v10, v11, s9
-; CHECK-NEXT: v_or_b32_e32 v11, 0x400000, v11
+; CHECK-NEXT: v_bfe_u32 v11, v10, 16, 1
+; CHECK-NEXT: s_movk_i32 s8, 0x7fff
+; CHECK-NEXT: v_add3_u32 v11, v11, v10, s8
+; CHECK-NEXT: v_or_b32_e32 v10, 0x400000, v10
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[4:5]
-; CHECK-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v11, |v[6:7]|
+; CHECK-NEXT: v_cndmask_b32_e32 v10, v11, v10, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v11, v[6:7]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[4:5], v11
; CHECK-NEXT: v_and_b32_e32 v12, 1, v11
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[6:7]|, v[4:5]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[6:7]|, v[4:5]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v12
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[6:7]|, |v[4:5]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[6:7], v[4:5]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v12
; CHECK-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v4, v11, v4
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v4, v4, v11, vcc
-; CHECK-NEXT: v_and_or_b32 v5, v7, s8, v4
-; CHECK-NEXT: v_bfe_u32 v4, v4, 16, 1
-; CHECK-NEXT: v_add3_u32 v4, v4, v5, s9
-; CHECK-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; CHECK-NEXT: v_bfe_u32 v5, v4, 16, 1
+; CHECK-NEXT: v_add3_u32 v5, v5, v4, s8
+; CHECK-NEXT: v_or_b32_e32 v4, 0x400000, v4
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[6:7], v[6:7]
-; CHECK-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
-; CHECK-NEXT: s_mov_b32 s10, 0x7060302
-; CHECK-NEXT: v_perm_b32 v5, v4, v10, s10
-; CHECK-NEXT: v_cvt_f32_f64_e64 v4, |v[0:1]|
+; CHECK-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; CHECK-NEXT: s_mov_b32 s9, 0x7060302
+; CHECK-NEXT: v_perm_b32 v5, v4, v10, s9
+; CHECK-NEXT: v_cvt_f32_f64_e32 v4, v[0:1]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[6:7], v4
; CHECK-NEXT: v_and_b32_e32 v10, 1, v4
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[0:1]|, v[6:7]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[0:1]|, v[6:7]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v10
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[0:1]|, |v[6:7]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[0:1], v[6:7]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v10
; CHECK-NEXT: v_cndmask_b32_e64 v6, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v6, v4, v6
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc
-; CHECK-NEXT: v_and_or_b32 v6, v1, s8, v4
-; CHECK-NEXT: v_bfe_u32 v4, v4, 16, 1
-; CHECK-NEXT: v_add3_u32 v4, v4, v6, s9
-; CHECK-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; CHECK-NEXT: v_bfe_u32 v6, v4, 16, 1
+; CHECK-NEXT: v_add3_u32 v6, v6, v4, s8
+; CHECK-NEXT: v_or_b32_e32 v4, 0x400000, v4
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
-; CHECK-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v6, |v[2:3]|
+; CHECK-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v6, v[2:3]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[0:1], v6
; CHECK-NEXT: v_and_b32_e32 v7, 1, v6
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[2:3]|, v[0:1]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[2:3]|, v[0:1]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[2:3]|, |v[0:1]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[2:3], v[0:1]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v7
; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v0, v6, v0
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc
-; CHECK-NEXT: v_and_or_b32 v1, v3, s8, v0
-; CHECK-NEXT: v_bfe_u32 v0, v0, 16, 1
-; CHECK-NEXT: v_add3_u32 v0, v0, v1, s9
-; CHECK-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; CHECK-NEXT: v_bfe_u32 v1, v0, 16, 1
+; CHECK-NEXT: v_add3_u32 v1, v1, v0, s8
+; CHECK-NEXT: v_or_b32_e32 v0, 0x400000, v0
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[2:3]
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; CHECK-NEXT: v_perm_b32 v4, v0, v4, s10
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; CHECK-NEXT: v_perm_b32 v4, v0, v4, s9
; CHECK-NEXT: global_store_dwordx2 v[8:9], v[4:5], off
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
@@ -233,141 +219,132 @@ define void @v8(<8 x double> %num, ptr addrspace(1) %p) {
; CHECK-LABEL: v8:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cvt_f32_f64_e64 v20, |v[12:13]|
+; CHECK-NEXT: v_cvt_f32_f64_e32 v20, v[12:13]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[18:19], v20
; CHECK-NEXT: v_and_b32_e32 v21, 1, v20
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[12:13]|, v[18:19]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[12:13]|, v[18:19]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v21
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[12:13]|, |v[18:19]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[12:13], v[18:19]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v21
; CHECK-NEXT: v_cndmask_b32_e64 v18, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v18, v20, v18
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v18, v18, v20, vcc
-; CHECK-NEXT: s_brev_b32 s8, 1
-; CHECK-NEXT: v_and_or_b32 v19, v13, s8, v18
-; CHECK-NEXT: v_bfe_u32 v18, v18, 16, 1
-; CHECK-NEXT: s_movk_i32 s9, 0x7fff
-; CHECK-NEXT: v_add3_u32 v18, v18, v19, s9
-; CHECK-NEXT: v_or_b32_e32 v19, 0x400000, v19
+; CHECK-NEXT: v_bfe_u32 v19, v18, 16, 1
+; CHECK-NEXT: s_movk_i32 s8, 0x7fff
+; CHECK-NEXT: v_add3_u32 v19, v19, v18, s8
+; CHECK-NEXT: v_or_b32_e32 v18, 0x400000, v18
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[12:13], v[12:13]
-; CHECK-NEXT: v_cndmask_b32_e32 v18, v18, v19, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v19, |v[14:15]|
+; CHECK-NEXT: v_cndmask_b32_e32 v18, v19, v18, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v19, v[14:15]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[12:13], v19
; CHECK-NEXT: v_and_b32_e32 v20, 1, v19
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[14:15]|, v[12:13]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[14:15]|, v[12:13]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v20
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[14:15]|, |v[12:13]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[14:15], v[12:13]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v20
; CHECK-NEXT: v_cndmask_b32_e64 v12, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v12, v19, v12
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v12, v12, v19, vcc
-; CHECK-NEXT: v_and_or_b32 v13, v15, s8, v12
-; CHECK-NEXT: v_bfe_u32 v12, v12, 16, 1
-; CHECK-NEXT: v_add3_u32 v12, v12, v13, s9
-; CHECK-NEXT: v_or_b32_e32 v13, 0x400000, v13
+; CHECK-NEXT: v_bfe_u32 v13, v12, 16, 1
+; CHECK-NEXT: v_add3_u32 v13, v13, v12, s8
+; CHECK-NEXT: v_or_b32_e32 v12, 0x400000, v12
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[14:15], v[14:15]
-; CHECK-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc
-; CHECK-NEXT: s_mov_b32 s10, 0x7060302
-; CHECK-NEXT: v_perm_b32 v13, v12, v18, s10
-; CHECK-NEXT: v_cvt_f32_f64_e64 v12, |v[8:9]|
+; CHECK-NEXT: v_cndmask_b32_e32 v12, v13, v12, vcc
+; CHECK-NEXT: s_mov_b32 s9, 0x7060302
+; CHECK-NEXT: v_perm_b32 v13, v12, v18, s9
+; CHECK-NEXT: v_cvt_f32_f64_e32 v12, v[8:9]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[14:15], v12
; CHECK-NEXT: v_and_b32_e32 v18, 1, v12
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[8:9]|, v[14:15]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[8:9]|, v[14:15]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v18
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[8:9]|, |v[14:15]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[8:9], v[14:15]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v18
; CHECK-NEXT: v_cndmask_b32_e64 v14, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v14, v12, v14
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v12, v14, v12, vcc
-; CHECK-NEXT: v_and_or_b32 v14, v9, s8, v12
-; CHECK-NEXT: v_bfe_u32 v12, v12, 16, 1
-; CHECK-NEXT: v_add3_u32 v12, v12, v14, s9
-; CHECK-NEXT: v_or_b32_e32 v14, 0x400000, v14
+; CHECK-NEXT: v_bfe_u32 v14, v12, 16, 1
+; CHECK-NEXT: v_add3_u32 v14, v14, v12, s8
+; CHECK-NEXT: v_or_b32_e32 v12, 0x400000, v12
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[8:9], v[8:9]
-; CHECK-NEXT: v_cndmask_b32_e32 v12, v12, v14, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v14, |v[10:11]|
+; CHECK-NEXT: v_cndmask_b32_e32 v12, v14, v12, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v14, v[10:11]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[8:9], v14
; CHECK-NEXT: v_and_b32_e32 v15, 1, v14
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[10:11]|, v[8:9]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[10:11]|, v[8:9]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v15
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[10:11]|, |v[8:9]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[10:11], v[8:9]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v15
; CHECK-NEXT: v_cndmask_b32_e64 v8, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v8, v14, v8
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v8, v8, v14, vcc
-; CHECK-NEXT: v_and_or_b32 v9, v11, s8, v8
-; CHECK-NEXT: v_bfe_u32 v8, v8, 16, 1
-; CHECK-NEXT: v_add3_u32 v8, v8, v9, s9
-; CHECK-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; CHECK-NEXT: v_bfe_u32 v9, v8, 16, 1
+; CHECK-NEXT: v_add3_u32 v9, v9, v8, s8
+; CHECK-NEXT: v_or_b32_e32 v8, 0x400000, v8
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[10:11], v[10:11]
-; CHECK-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v10, |v[4:5]|
-; CHECK-NEXT: v_perm_b32 v12, v8, v12, s10
+; CHECK-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v10, v[4:5]
+; CHECK-NEXT: v_perm_b32 v12, v8, v12, s9
; CHECK-NEXT: v_cvt_f64_f32_e32 v[8:9], v10
; CHECK-NEXT: v_and_b32_e32 v11, 1, v10
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[4:5]|, v[8:9]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[4:5]|, v[8:9]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v11
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[4:5]|, |v[8:9]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[4:5], v[8:9]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v11
; CHECK-NEXT: v_cndmask_b32_e64 v8, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v8, v10, v8
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
-; CHECK-NEXT: v_and_or_b32 v9, v5, s8, v8
-; CHECK-NEXT: v_bfe_u32 v8, v8, 16, 1
-; CHECK-NEXT: v_add3_u32 v8, v8, v9, s9
-; CHECK-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; CHECK-NEXT: v_bfe_u32 v9, v8, 16, 1
+; CHECK-NEXT: v_add3_u32 v9, v9, v8, s8
+; CHECK-NEXT: v_or_b32_e32 v8, 0x400000, v8
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[4:5]
-; CHECK-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v9, |v[6:7]|
+; CHECK-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v9, v[6:7]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[4:5], v9
; CHECK-NEXT: v_and_b32_e32 v10, 1, v9
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[6:7]|, v[4:5]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[6:7]|, v[4:5]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v10
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[6:7]|, |v[4:5]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[6:7], v[4:5]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v10
; CHECK-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v4, v9, v4
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc
-; CHECK-NEXT: v_and_or_b32 v5, v7, s8, v4
-; CHECK-NEXT: v_bfe_u32 v4, v4, 16, 1
-; CHECK-NEXT: v_add3_u32 v4, v4, v5, s9
-; CHECK-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; CHECK-NEXT: v_bfe_u32 v5, v4, 16, 1
+; CHECK-NEXT: v_add3_u32 v5, v5, v4, s8
+; CHECK-NEXT: v_or_b32_e32 v4, 0x400000, v4
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[6:7], v[6:7]
-; CHECK-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
-; CHECK-NEXT: v_perm_b32 v11, v4, v8, s10
+; CHECK-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v6, v[0:1]
+; CHECK-NEXT: v_perm_b32 v11, v4, v8, s9
; CHECK-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
; CHECK-NEXT: v_and_b32_e32 v7, 1, v6
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[0:1]|, v[4:5]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[0:1]|, v[4:5]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[0:1]|, |v[4:5]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[0:1], v[4:5]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v7
; CHECK-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v4, v6, v4
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
-; CHECK-NEXT: v_and_or_b32 v5, v1, s8, v4
-; CHECK-NEXT: v_bfe_u32 v4, v4, 16, 1
-; CHECK-NEXT: v_add3_u32 v4, v4, v5, s9
-; CHECK-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; CHECK-NEXT: v_bfe_u32 v5, v4, 16, 1
+; CHECK-NEXT: v_add3_u32 v5, v5, v4, s8
+; CHECK-NEXT: v_or_b32_e32 v4, 0x400000, v4
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
-; CHECK-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v5, |v[2:3]|
+; CHECK-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v5, v[2:3]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[0:1], v5
; CHECK-NEXT: v_and_b32_e32 v6, 1, v5
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[2:3]|, v[0:1]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[2:3]|, v[0:1]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v6
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[2:3]|, |v[0:1]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[2:3], v[0:1]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v6
; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v0, v5, v0
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc
-; CHECK-NEXT: v_and_or_b32 v1, v3, s8, v0
-; CHECK-NEXT: v_bfe_u32 v0, v0, 16, 1
-; CHECK-NEXT: v_add3_u32 v0, v0, v1, s9
-; CHECK-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; CHECK-NEXT: v_bfe_u32 v1, v0, 16, 1
+; CHECK-NEXT: v_add3_u32 v1, v1, v0, s8
+; CHECK-NEXT: v_or_b32_e32 v0, 0x400000, v0
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[2:3]
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; CHECK-NEXT: v_perm_b32 v10, v0, v4, s10
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; CHECK-NEXT: v_perm_b32 v10, v0, v4, s9
; CHECK-NEXT: global_store_dwordx4 v[16:17], v[10:13], off
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
@@ -384,274 +361,257 @@ define void @v16(<16 x double> %num, ptr addrspace(1) %p) {
; CHECK-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; CHECK-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; CHECK-NEXT: v_cvt_f32_f64_e64 v36, |v[12:13]|
+; CHECK-NEXT: v_cvt_f32_f64_e32 v36, v[12:13]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[34:35], v36
; CHECK-NEXT: v_and_b32_e32 v37, 1, v36
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[12:13]|, v[34:35]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[4:5], |v[12:13]|, v[34:35]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v37
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[6:7], |v[12:13]|, |v[34:35]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[12:13], v[34:35]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v37
; CHECK-NEXT: v_cndmask_b32_e64 v34, -1, 1, s[6:7]
; CHECK-NEXT: v_add_u32_e32 v34, v36, v34
-; CHECK-NEXT: s_or_b64 vcc, s[4:5], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v34, v34, v36, vcc
-; CHECK-NEXT: s_brev_b32 s4, 1
-; CHECK-NEXT: v_and_or_b32 v35, v13, s4, v34
-; CHECK-NEXT: v_bfe_u32 v34, v34, 16, 1
-; CHECK-NEXT: s_movk_i32 s5, 0x7fff
-; CHECK-NEXT: v_add3_u32 v34, v34, v35, s5
-; CHECK-NEXT: v_or_b32_e32 v35, 0x400000, v35
+; CHECK-NEXT: v_bfe_u32 v35, v34, 16, 1
+; CHECK-NEXT: s_movk_i32 s6, 0x7fff
+; CHECK-NEXT: v_add3_u32 v35, v35, v34, s6
+; CHECK-NEXT: v_or_b32_e32 v34, 0x400000, v34
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[12:13], v[12:13]
-; CHECK-NEXT: v_cndmask_b32_e32 v34, v34, v35, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v35, |v[14:15]|
+; CHECK-NEXT: v_cndmask_b32_e32 v34, v35, v34, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v35, v[14:15]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[12:13], v35
; CHECK-NEXT: v_and_b32_e32 v36, 1, v35
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[8:9], |v[14:15]|, v[12:13]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[6:7], |v[14:15]|, v[12:13]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v36
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[8:9], |v[14:15]|, |v[12:13]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[14:15], v[12:13]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v36
; CHECK-NEXT: v_cndmask_b32_e64 v12, -1, 1, s[8:9]
; CHECK-NEXT: v_add_u32_e32 v12, v35, v12
-; CHECK-NEXT: s_or_b64 vcc, s[6:7], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v12, v12, v35, vcc
-; CHECK-NEXT: v_and_or_b32 v13, v15, s4, v12
-; CHECK-NEXT: v_bfe_u32 v12, v12, 16, 1
-; CHECK-NEXT: v_add3_u32 v12, v12, v13, s5
-; CHECK-NEXT: v_or_b32_e32 v13, 0x400000, v13
+; CHECK-NEXT: v_bfe_u32 v13, v12, 16, 1
+; CHECK-NEXT: v_add3_u32 v13, v13, v12, s6
+; CHECK-NEXT: v_or_b32_e32 v12, 0x400000, v12
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[14:15], v[14:15]
-; CHECK-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc
-; CHECK-NEXT: s_mov_b32 s6, 0x7060302
-; CHECK-NEXT: v_perm_b32 v13, v12, v34, s6
-; CHECK-NEXT: v_cvt_f32_f64_e64 v12, |v[8:9]|
+; CHECK-NEXT: v_cndmask_b32_e32 v12, v13, v12, vcc
+; CHECK-NEXT: s_mov_b32 s7, 0x7060302
+; CHECK-NEXT: v_perm_b32 v13, v12, v34, s7
+; CHECK-NEXT: v_cvt_f32_f64_e32 v12, v[8:9]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[14:15], v12
; CHECK-NEXT: v_and_b32_e32 v34, 1, v12
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[10:11], |v[8:9]|, v[14:15]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], |v[8:9]|, v[14:15]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v34
-; CHECK-NEXT: v_cndmask_b32_e64 v14, -1, 1, s[10:11]
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[8:9], |v[8:9]|, |v[14:15]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[8:9], v[14:15]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v34
+; CHECK-NEXT: v_cndmask_b32_e64 v14, -1, 1, s[8:9]
; CHECK-NEXT: v_add_u32_e32 v14, v12, v14
-; CHECK-NEXT: s_or_b64 vcc, s[8:9], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v12, v14, v12, vcc
-; CHECK-NEXT: v_and_or_b32 v14, v9, s4, v12
-; CHECK-NEXT: v_bfe_u32 v12, v12, 16, 1
-; CHECK-NEXT: v_add3_u32 v12, v12, v14, s5
-; CHECK-NEXT: v_or_b32_e32 v14, 0x400000, v14
+; CHECK-NEXT: v_bfe_u32 v14, v12, 16, 1
+; CHECK-NEXT: v_add3_u32 v14, v14, v12, s6
+; CHECK-NEXT: v_or_b32_e32 v12, 0x400000, v12
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[8:9], v[8:9]
-; CHECK-NEXT: v_cndmask_b32_e32 v12, v12, v14, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v14, |v[10:11]|
+; CHECK-NEXT: v_cndmask_b32_e32 v12, v14, v12, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v14, v[10:11]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[8:9], v14
; CHECK-NEXT: v_and_b32_e32 v15, 1, v14
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[10:11], |v[10:11]|, v[8:9]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], |v[10:11]|, v[8:9]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v15
-; CHECK-NEXT: v_cndmask_b32_e64 v8, -1, 1, s[10:11]
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[8:9], |v[10:11]|, |v[8:9]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[10:11], v[8:9]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v15
+; CHECK-NEXT: v_cndmask_b32_e64 v8, -1, 1, s[8:9]
; CHECK-NEXT: v_add_u32_e32 v8, v14, v8
-; CHECK-NEXT: s_or_b64 vcc, s[8:9], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v8, v8, v14, vcc
-; CHECK-NEXT: v_and_or_b32 v9, v11, s4, v8
-; CHECK-NEXT: v_bfe_u32 v8, v8, 16, 1
-; CHECK-NEXT: v_add3_u32 v8, v8, v9, s5
-; CHECK-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; CHECK-NEXT: v_bfe_u32 v9, v8, 16, 1
+; CHECK-NEXT: v_add3_u32 v9, v9, v8, s6
+; CHECK-NEXT: v_or_b32_e32 v8, 0x400000, v8
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[10:11], v[10:11]
-; CHECK-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v10, |v[4:5]|
-; CHECK-NEXT: v_perm_b32 v12, v8, v12, s6
+; CHECK-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v10, v[4:5]
+; CHECK-NEXT: v_perm_b32 v12, v8, v12, s7
; CHECK-NEXT: v_cvt_f64_f32_e32 v[8:9], v10
; CHECK-NEXT: v_and_b32_e32 v11, 1, v10
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[10:11], |v[4:5]|, v[8:9]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], |v[4:5]|, v[8:9]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v11
-; CHECK-NEXT: v_cndmask_b32_e64 v8, -1, 1, s[10:11]
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[8:9], |v[4:5]|, |v[8:9]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[4:5], v[8:9]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v11
+; CHECK-NEXT: v_cndmask_b32_e64 v8, -1, 1, s[8:9]
; CHECK-NEXT: v_add_u32_e32 v8, v10, v8
-; CHECK-NEXT: s_or_b64 vcc, s[8:9], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
-; CHECK-NEXT: v_and_or_b32 v9, v5, s4, v8
-; CHECK-NEXT: v_bfe_u32 v8, v8, 16, 1
-; CHECK-NEXT: v_add3_u32 v8, v8, v9, s5
-; CHECK-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; CHECK-NEXT: v_bfe_u32 v9, v8, 16, 1
+; CHECK-NEXT: v_add3_u32 v9, v9, v8, s6
+; CHECK-NEXT: v_or_b32_e32 v8, 0x400000, v8
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[4:5]
-; CHECK-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v9, |v[6:7]|
+; CHECK-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v9, v[6:7]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[4:5], v9
; CHECK-NEXT: v_and_b32_e32 v10, 1, v9
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[10:11], |v[6:7]|, v[4:5]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], |v[6:7]|, v[4:5]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v10
-; CHECK-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[10:11]
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[8:9], |v[6:7]|, |v[4:5]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[6:7], v[4:5]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v10
+; CHECK-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[8:9]
; CHECK-NEXT: v_add_u32_e32 v4, v9, v4
-; CHECK-NEXT: s_or_b64 vcc, s[8:9], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc
-; CHECK-NEXT: v_and_or_b32 v5, v7, s4, v4
-; CHECK-NEXT: v_bfe_u32 v4, v4, 16, 1
-; CHECK-NEXT: v_add3_u32 v4, v4, v5, s5
-; CHECK-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; CHECK-NEXT: v_bfe_u32 v5, v4, 16, 1
+; CHECK-NEXT: v_add3_u32 v5, v5, v4, s6
+; CHECK-NEXT: v_or_b32_e32 v4, 0x400000, v4
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[6:7], v[6:7]
-; CHECK-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
-; CHECK-NEXT: v_perm_b32 v11, v4, v8, s6
+; CHECK-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v6, v[0:1]
+; CHECK-NEXT: v_perm_b32 v11, v4, v8, s7
; CHECK-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
; CHECK-NEXT: v_and_b32_e32 v7, 1, v6
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[10:11], |v[0:1]|, v[4:5]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], |v[0:1]|, v[4:5]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
-; CHECK-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[10:11]
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[8:9], |v[0:1]|, |v[4:5]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[0:1], v[4:5]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v7
+; CHECK-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[8:9]
; CHECK-NEXT: v_add_u32_e32 v4, v6, v4
-; CHECK-NEXT: s_or_b64 vcc, s[8:9], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
-; CHECK-NEXT: v_and_or_b32 v5, v1, s4, v4
-; CHECK-NEXT: v_bfe_u32 v4, v4, 16, 1
-; CHECK-NEXT: v_add3_u32 v4, v4, v5, s5
-; CHECK-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; CHECK-NEXT: v_bfe_u32 v5, v4, 16, 1
+; CHECK-NEXT: v_add3_u32 v5, v5, v4, s6
+; CHECK-NEXT: v_or_b32_e32 v4, 0x400000, v4
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
-; CHECK-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v5, |v[2:3]|
+; CHECK-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v5, v[2:3]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[0:1], v5
; CHECK-NEXT: v_and_b32_e32 v6, 1, v5
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[10:11], |v[2:3]|, v[0:1]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], |v[2:3]|, v[0:1]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v6
-; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[10:11]
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[8:9], |v[2:3]|, |v[0:1]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[2:3], v[0:1]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v6
+; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[8:9]
; CHECK-NEXT: v_add_u32_e32 v0, v5, v0
-; CHECK-NEXT: s_or_b64 vcc, s[8:9], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc
-; CHECK-NEXT: v_and_or_b32 v1, v3, s4, v0
-; CHECK-NEXT: v_bfe_u32 v0, v0, 16, 1
-; CHECK-NEXT: v_add3_u32 v0, v0, v1, s5
-; CHECK-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; CHECK-NEXT: v_bfe_u32 v1, v0, 16, 1
+; CHECK-NEXT: v_add3_u32 v1, v1, v0, s6
+; CHECK-NEXT: v_or_b32_e32 v0, 0x400000, v0
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[2:3]
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v2, |v[28:29]|
-; CHECK-NEXT: v_perm_b32 v10, v0, v4, s6
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v2, v[28:29]
+; CHECK-NEXT: v_perm_b32 v10, v0, v4, s7
; CHECK-NEXT: v_cvt_f64_f32_e32 v[0:1], v2
; CHECK-NEXT: v_and_b32_e32 v3, 1, v2
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[10:11], |v[28:29]|, v[0:1]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], |v[28:29]|, v[0:1]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v3
-; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[10:11]
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[8:9], |v[28:29]|, |v[0:1]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[28:29], v[0:1]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v3
+; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[8:9]
; CHECK-NEXT: v_add_u32_e32 v0, v2, v0
-; CHECK-NEXT: s_or_b64 vcc, s[8:9], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; CHECK-NEXT: v_and_or_b32 v1, v29, s4, v0
-; CHECK-NEXT: v_bfe_u32 v0, v0, 16, 1
-; CHECK-NEXT: v_add3_u32 v0, v0, v1, s5
-; CHECK-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; CHECK-NEXT: v_bfe_u32 v1, v0, 16, 1
+; CHECK-NEXT: v_add3_u32 v1, v1, v0, s6
+; CHECK-NEXT: v_or_b32_e32 v0, 0x400000, v0
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[28:29], v[28:29]
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cvt_f32_f64_e64 v3, |v[30:31]|
-; CHECK-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v3, v[30:31]
+; CHECK-NEXT: v_cndmask_b32_e32 v2, v1, v0, vcc
; CHECK-NEXT: v_cvt_f64_f32_e32 v[0:1], v3
; CHECK-NEXT: v_and_b32_e32 v4, 1, v3
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[10:11], |v[30:31]|, v[0:1]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], |v[30:31]|, v[0:1]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4
-; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[10:11]
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[8:9], |v[30:31]|, |v[0:1]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[30:31], v[0:1]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v4
+; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[8:9]
; CHECK-NEXT: v_add_u32_e32 v0, v3, v0
-; CHECK-NEXT: s_or_b64 vcc, s[8:9], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
-; CHECK-NEXT: v_and_or_b32 v1, v31, s4, v0
-; CHECK-NEXT: v_bfe_u32 v0, v0, 16, 1
-; CHECK-NEXT: v_add3_u32 v0, v0, v1, s5
-; CHECK-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; CHECK-NEXT: v_bfe_u32 v1, v0, 16, 1
+; CHECK-NEXT: v_add3_u32 v1, v1, v0, s6
+; CHECK-NEXT: v_or_b32_e32 v0, 0x400000, v0
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[30:31], v[30:31]
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; CHECK-NEXT: v_perm_b32 v3, v0, v2, s6
-; CHECK-NEXT: v_cvt_f32_f64_e64 v2, |v[24:25]|
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; CHECK-NEXT: v_perm_b32 v3, v0, v2, s7
+; CHECK-NEXT: v_cvt_f32_f64_e32 v2, v[24:25]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[0:1], v2
; CHECK-NEXT: v_and_b32_e32 v4, 1, v2
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[10:11], |v[24:25]|, v[0:1]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], |v[24:25]|, v[0:1]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4
-; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[10:11]
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[8:9], |v[24:25]|, |v[0:1]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[24:25], v[0:1]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v4
+; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[8:9]
; CHECK-NEXT: v_add_u32_e32 v0, v2, v0
-; CHECK-NEXT: s_or_b64 vcc, s[8:9], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; CHECK-NEXT: v_and_or_b32 v1, v25, s4, v0
-; CHECK-NEXT: v_bfe_u32 v0, v0, 16, 1
-; CHECK-NEXT: v_add3_u32 v0, v0, v1, s5
-; CHECK-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; CHECK-NEXT: v_bfe_u32 v1, v0, 16, 1
+; CHECK-NEXT: v_add3_u32 v1, v1, v0, s6
+; CHECK-NEXT: v_or_b32_e32 v0, 0x400000, v0
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[24:25], v[24:25]
-; CHECK-NEXT: v_cvt_f32_f64_e64 v4, |v[26:27]|
-; CHECK-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v4, v[26:27]
+; CHECK-NEXT: v_cndmask_b32_e32 v2, v1, v0, vcc
; CHECK-NEXT: v_cvt_f64_f32_e32 v[0:1], v4
; CHECK-NEXT: v_and_b32_e32 v5, 1, v4
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[10:11], |v[26:27]|, v[0:1]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], |v[26:27]|, v[0:1]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v5
-; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[10:11]
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[8:9], |v[26:27]|, |v[0:1]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[26:27], v[0:1]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v5
+; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[8:9]
; CHECK-NEXT: v_add_u32_e32 v0, v4, v0
-; CHECK-NEXT: s_or_b64 vcc, s[8:9], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
-; CHECK-NEXT: v_and_or_b32 v1, v27, s4, v0
-; CHECK-NEXT: v_bfe_u32 v0, v0, 16, 1
-; CHECK-NEXT: v_add3_u32 v0, v0, v1, s5
-; CHECK-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; CHECK-NEXT: v_bfe_u32 v1, v0, 16, 1
+; CHECK-NEXT: v_add3_u32 v1, v1, v0, s6
+; CHECK-NEXT: v_or_b32_e32 v0, 0x400000, v0
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[26:27], v[26:27]
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; CHECK-NEXT: v_cvt_f32_f64_e64 v4, |v[20:21]|
-; CHECK-NEXT: v_perm_b32 v2, v0, v2, s6
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v4, v[20:21]
+; CHECK-NEXT: v_perm_b32 v2, v0, v2, s7
; CHECK-NEXT: v_cvt_f64_f32_e32 v[0:1], v4
; CHECK-NEXT: v_and_b32_e32 v5, 1, v4
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[10:11], |v[20:21]|, v[0:1]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], |v[20:21]|, v[0:1]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v5
-; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[10:11]
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[8:9], |v[20:21]|, |v[0:1]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[20:21], v[0:1]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v5
+; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[8:9]
; CHECK-NEXT: v_add_u32_e32 v0, v4, v0
-; CHECK-NEXT: s_or_b64 vcc, s[8:9], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
-; CHECK-NEXT: v_and_or_b32 v1, v21, s4, v0
-; CHECK-NEXT: v_bfe_u32 v0, v0, 16, 1
-; CHECK-NEXT: v_add3_u32 v0, v0, v1, s5
-; CHECK-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; CHECK-NEXT: v_bfe_u32 v1, v0, 16, 1
+; CHECK-NEXT: v_add3_u32 v1, v1, v0, s6
+; CHECK-NEXT: v_or_b32_e32 v0, 0x400000, v0
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[20:21], v[20:21]
-; CHECK-NEXT: v_cvt_f32_f64_e64 v5, |v[22:23]|
-; CHECK-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v5, v[22:23]
+; CHECK-NEXT: v_cndmask_b32_e32 v4, v1, v0, vcc
; CHECK-NEXT: v_cvt_f64_f32_e32 v[0:1], v5
; CHECK-NEXT: v_and_b32_e32 v6, 1, v5
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[10:11], |v[22:23]|, v[0:1]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], |v[22:23]|, v[0:1]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v6
-; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[10:11]
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[8:9], |v[22:23]|, |v[0:1]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[22:23], v[0:1]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v6
+; CHECK-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[8:9]
; CHECK-NEXT: v_add_u32_e32 v0, v5, v0
-; CHECK-NEXT: s_or_b64 vcc, s[8:9], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc
-; CHECK-NEXT: v_and_or_b32 v1, v23, s4, v0
-; CHECK-NEXT: v_bfe_u32 v0, v0, 16, 1
-; CHECK-NEXT: v_add3_u32 v0, v0, v1, s5
-; CHECK-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; CHECK-NEXT: v_bfe_u32 v1, v0, 16, 1
+; CHECK-NEXT: v_add3_u32 v1, v1, v0, s6
+; CHECK-NEXT: v_or_b32_e32 v0, 0x400000, v0
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[22:23], v[22:23]
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; CHECK-NEXT: v_perm_b32 v1, v0, v4, s6
-; CHECK-NEXT: v_cvt_f32_f64_e64 v0, |v[16:17]|
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; CHECK-NEXT: v_perm_b32 v1, v0, v4, s7
+; CHECK-NEXT: v_cvt_f32_f64_e32 v0, v[16:17]
; CHECK-NEXT: v_cvt_f64_f32_e32 v[4:5], v0
; CHECK-NEXT: v_and_b32_e32 v6, 1, v0
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[10:11], |v[16:17]|, v[4:5]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], |v[16:17]|, v[4:5]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v6
-; CHECK-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[10:11]
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[8:9], |v[16:17]|, |v[4:5]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[16:17], v[4:5]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v6
+; CHECK-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[8:9]
; CHECK-NEXT: v_add_u32_e32 v4, v0, v4
-; CHECK-NEXT: s_or_b64 vcc, s[8:9], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
-; CHECK-NEXT: v_and_or_b32 v4, v17, s4, v0
-; CHECK-NEXT: v_bfe_u32 v0, v0, 16, 1
-; CHECK-NEXT: v_add3_u32 v0, v0, v4, s5
-; CHECK-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; CHECK-NEXT: v_bfe_u32 v4, v0, 16, 1
+; CHECK-NEXT: v_add3_u32 v4, v4, v0, s6
+; CHECK-NEXT: v_or_b32_e32 v0, 0x400000, v0
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[16:17], v[16:17]
-; CHECK-NEXT: v_cvt_f32_f64_e64 v6, |v[18:19]|
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; CHECK-NEXT: v_cvt_f32_f64_e32 v6, v[18:19]
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; CHECK-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
; CHECK-NEXT: v_and_b32_e32 v7, 1, v6
-; CHECK-NEXT: v_cmp_gt_f64_e64 s[10:11], |v[18:19]|, v[4:5]
-; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], |v[18:19]|, v[4:5]
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
-; CHECK-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[10:11]
+; CHECK-NEXT: v_cmp_gt_f64_e64 s[8:9], |v[18:19]|, |v[4:5]|
+; CHECK-NEXT: v_cmp_nlg_f64_e32 vcc, v[18:19], v[4:5]
+; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v7
+; CHECK-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[8:9]
; CHECK-NEXT: v_add_u32_e32 v4, v6, v4
-; CHECK-NEXT: s_or_b64 vcc, s[8:9], vcc
+; CHECK-NEXT: s_or_b64 vcc, vcc, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
-; CHECK-NEXT: v_and_or_b32 v5, v19, s4, v4
-; CHECK-NEXT: v_bfe_u32 v4, v4, 16, 1
-; CHECK-NEXT: v_add3_u32 v4, v4, v5, s5
-; CHECK-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; CHECK-NEXT: v_bfe_u32 v5, v4, 16, 1
+; CHECK-NEXT: v_add3_u32 v5, v5, v4, s6
+; CHECK-NEXT: v_or_b32_e32 v4, 0x400000, v4
; CHECK-NEXT: v_cmp_u_f64_e32 vcc, v[18:19], v[18:19]
-; CHECK-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
-; CHECK-NEXT: v_perm_b32 v0, v4, v0, s6
+; CHECK-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; CHECK-NEXT: v_perm_b32 v0, v4, v0, s7
; CHECK-NEXT: global_store_dwordx4 v[32:33], v[0:3], off offset:16
; CHECK-NEXT: global_store_dwordx4 v[32:33], v[10:13], off
; CHECK-NEXT: s_waitcnt vmcnt(0)
More information about the llvm-commits
mailing list