[llvm] [DAG] visitFREEZE - nodes that allow multiple frozen operands should directly freeze all operands (PR #160914)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 29 07:46:24 PDT 2025
https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/160914
>From d4101284e1ea51e0cc7ed8a29925a0d8ee501f27 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Fri, 26 Sep 2025 16:40:16 +0100
Subject: [PATCH] [DAG] visitFREEZE - nodes that allow multiple frozen operands
should directly freeze all operands
Avoid the ReplaceAllUsesOfValueWith approach that has caused so many problems in previous attempts at this (#145939).
Minor step towards #149798 - eventually we're just going to use this path for all node types, but there's a large number of regressions that need addressing first (see #152107).
This exposed a couple of things that need to be addressed here:
- we need to revisit frozen nodes once we've merged all frozen/unfrozen uses of a node.
- RISCVISD::READ_VLENB is never undef/poison
Many of the current regressions are due to us not doing more to avoid freeze(load) nodes - if the load is legal, its no longer going to split. I'm not certain exactly when we can split nodes - #160884 tried to catch up to ValueTracking but that might still be wrong.
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 135 +-
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 +
...amdgpu-codegenprepare-fold-binop-select.ll | 4 +-
llvm/test/CodeGen/AMDGPU/fptoi.i128.ll | 28 +-
llvm/test/CodeGen/AMDGPU/load-constant-i1.ll | 357 +-
llvm/test/CodeGen/AMDGPU/load-constant-i16.ll | 738 ++--
llvm/test/CodeGen/AMDGPU/load-constant-i8.ll | 1814 ++++----
llvm/test/CodeGen/AMDGPU/load-global-i16.ll | 1058 +++--
llvm/test/CodeGen/AMDGPU/load-global-i8.ll | 1499 +++----
llvm/test/CodeGen/AMDGPU/load-local-i16.ll | 30 +-
llvm/test/CodeGen/AMDGPU/pr155452.ll | 74 +-
llvm/test/CodeGen/AMDGPU/sdiv.ll | 118 +-
llvm/test/CodeGen/AMDGPU/srem.ll | 375 +-
.../test/CodeGen/AMDGPU/vector-reduce-smax.ll | 12 +-
.../test/CodeGen/AMDGPU/vector-reduce-smin.ll | 12 +-
.../test/CodeGen/AMDGPU/vector-reduce-umax.ll | 12 +-
.../test/CodeGen/AMDGPU/vector-reduce-umin.ll | 12 +-
llvm/test/CodeGen/RISCV/abds.ll | 248 +-
llvm/test/CodeGen/RISCV/abdu-neg.ll | 508 +--
llvm/test/CodeGen/RISCV/fpclamptosat.ll | 368 +-
llvm/test/CodeGen/RISCV/iabs.ll | 24 +-
.../RISCV/intrinsic-cttz-elts-vscale.ll | 21 +-
llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll | 1210 ++++--
llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll | 3820 ++++++++++-------
llvm/test/CodeGen/RISCV/rvv/vp-splice.ll | 78 +-
llvm/test/CodeGen/X86/bmi-select-distrib.ll | 36 +-
.../CodeGen/X86/insertelement-var-index.ll | 44 +-
llvm/test/CodeGen/X86/midpoint-int-vec-256.ll | 702 +--
llvm/test/CodeGen/X86/midpoint-int-vec-512.ll | 472 +-
29 files changed, 7695 insertions(+), 6116 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 77df4b4598c48..84c9abf64d1cb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -16918,6 +16918,8 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) {
// creating a cycle in a DAG. Let's undo that by mutating the freeze.
assert(N->getOperand(0) == FrozenN0 && "Expected cycle in DAG");
DAG.UpdateNodeOperands(N, N0);
+ // Revisit the node.
+ AddToWorklist(N);
return FrozenN0;
}
@@ -16972,72 +16974,81 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) {
}
}
- SmallSet<SDValue, 8> MaybePoisonOperands;
- SmallVector<unsigned, 8> MaybePoisonOperandNumbers;
- for (auto [OpNo, Op] : enumerate(N0->ops())) {
- if (DAG.isGuaranteedNotToBeUndefOrPoison(Op, /*PoisonOnly=*/false))
- continue;
- bool HadMaybePoisonOperands = !MaybePoisonOperands.empty();
- bool IsNewMaybePoisonOperand = MaybePoisonOperands.insert(Op).second;
- if (IsNewMaybePoisonOperand)
- MaybePoisonOperandNumbers.push_back(OpNo);
- if (!HadMaybePoisonOperands)
- continue;
- if (IsNewMaybePoisonOperand && !AllowMultipleMaybePoisonOperands) {
- // Multiple maybe-poison ops when not allowed - bail out.
- return SDValue();
+ SmallVector<SDValue> Ops;
+ if (AllowMultipleMaybePoisonOperands) {
+ // Collect and freeze all operands.
+ Ops = SmallVector<SDValue>(N0->ops());
+ for (SDValue &Op : Ops)
+ Op = DAG.getFreeze(Op);
+ } else {
+ SmallSet<SDValue, 8> MaybePoisonOperands;
+ SmallVector<unsigned, 8> MaybePoisonOperandNumbers;
+ for (auto [OpNo, Op] : enumerate(N0->ops())) {
+ if (DAG.isGuaranteedNotToBeUndefOrPoison(Op, /*PoisonOnly=*/false))
+ continue;
+ bool HadMaybePoisonOperands = !MaybePoisonOperands.empty();
+ bool IsNewMaybePoisonOperand = MaybePoisonOperands.insert(Op).second;
+ if (IsNewMaybePoisonOperand)
+ MaybePoisonOperandNumbers.push_back(OpNo);
+ if (!HadMaybePoisonOperands)
+ continue;
+ if (IsNewMaybePoisonOperand) {
+ // Multiple maybe-poison ops when not allowed - bail out.
+ return SDValue();
+ }
+ }
+ // NOTE: the whole op may be not guaranteed to not be undef or poison
+ // because it could create undef or poison due to it's poison-generating
+ // flags. So not finding any maybe-poison operands is fine.
+
+ for (unsigned OpNo : MaybePoisonOperandNumbers) {
+ // N0 can mutate during iteration, so make sure to refetch the maybe
+ // poison operands via the operand numbers. The typical scenario is that
+ // we have something like this
+ // t262: i32 = freeze t181
+ // t150: i32 = ctlz_zero_undef t262
+ // t184: i32 = ctlz_zero_undef t181
+ // t268: i32 = select_cc t181, Constant:i32<0>, t184, t186, setne:ch
+ // When freezing the t181 operand we get t262 back, and then the
+ // ReplaceAllUsesOfValueWith call will not only replace t181 by t262, but
+ // also recursively replace t184 by t150.
+ SDValue MaybePoisonOperand = N->getOperand(0).getOperand(OpNo);
+ // Don't replace every single UNDEF everywhere with frozen UNDEF, though.
+ if (MaybePoisonOperand.isUndef())
+ continue;
+ // First, freeze each offending operand.
+ SDValue FrozenMaybePoisonOperand = DAG.getFreeze(MaybePoisonOperand);
+ // Then, change all other uses of unfrozen operand to use frozen operand.
+ DAG.ReplaceAllUsesOfValueWith(MaybePoisonOperand,
+ FrozenMaybePoisonOperand);
+ if (FrozenMaybePoisonOperand.getOpcode() == ISD::FREEZE &&
+ FrozenMaybePoisonOperand.getOperand(0) == FrozenMaybePoisonOperand) {
+ // But, that also updated the use in the freeze we just created, thus
+ // creating a cycle in a DAG. Let's undo that by mutating the freeze.
+ DAG.UpdateNodeOperands(FrozenMaybePoisonOperand.getNode(),
+ MaybePoisonOperand);
+ }
+
+ // This node has been merged with another.
+ if (N->getOpcode() == ISD::DELETED_NODE)
+ return SDValue(N, 0);
}
- }
- // NOTE: the whole op may be not guaranteed to not be undef or poison because
- // it could create undef or poison due to it's poison-generating flags.
- // So not finding any maybe-poison operands is fine.
-
- for (unsigned OpNo : MaybePoisonOperandNumbers) {
- // N0 can mutate during iteration, so make sure to refetch the maybe poison
- // operands via the operand numbers. The typical scenario is that we have
- // something like this
- // t262: i32 = freeze t181
- // t150: i32 = ctlz_zero_undef t262
- // t184: i32 = ctlz_zero_undef t181
- // t268: i32 = select_cc t181, Constant:i32<0>, t184, t186, setne:ch
- // When freezing the t181 operand we get t262 back, and then the
- // ReplaceAllUsesOfValueWith call will not only replace t181 by t262, but
- // also recursively replace t184 by t150.
- SDValue MaybePoisonOperand = N->getOperand(0).getOperand(OpNo);
- // Don't replace every single UNDEF everywhere with frozen UNDEF, though.
- if (MaybePoisonOperand.isUndef())
- continue;
- // First, freeze each offending operand.
- SDValue FrozenMaybePoisonOperand = DAG.getFreeze(MaybePoisonOperand);
- // Then, change all other uses of unfrozen operand to use frozen operand.
- DAG.ReplaceAllUsesOfValueWith(MaybePoisonOperand, FrozenMaybePoisonOperand);
- if (FrozenMaybePoisonOperand.getOpcode() == ISD::FREEZE &&
- FrozenMaybePoisonOperand.getOperand(0) == FrozenMaybePoisonOperand) {
- // But, that also updated the use in the freeze we just created, thus
- // creating a cycle in a DAG. Let's undo that by mutating the freeze.
- DAG.UpdateNodeOperands(FrozenMaybePoisonOperand.getNode(),
- MaybePoisonOperand);
- }
-
- // This node has been merged with another.
- if (N->getOpcode() == ISD::DELETED_NODE)
- return SDValue(N, 0);
- }
- assert(N->getOpcode() != ISD::DELETED_NODE && "Node was deleted!");
+ assert(N->getOpcode() != ISD::DELETED_NODE && "Node was deleted!");
- // The whole node may have been updated, so the value we were holding
- // may no longer be valid. Re-fetch the operand we're `freeze`ing.
- N0 = N->getOperand(0);
+ // The whole node may have been updated, so the value we were holding
+ // may no longer be valid. Re-fetch the operand we're `freeze`ing.
+ N0 = N->getOperand(0);
- // Finally, recreate the node, it's operands were updated to use
- // frozen operands, so we just need to use it's "original" operands.
- SmallVector<SDValue> Ops(N0->ops());
- // TODO: ISD::UNDEF and ISD::POISON should get separate handling, but best
- // leave for a future patch.
- for (SDValue &Op : Ops) {
- if (Op.isUndef())
- Op = DAG.getFreeze(Op);
+ // Finally, recreate the node, it's operands were updated to use
+ // frozen operands, so we just need to use it's "original" operands.
+ Ops = SmallVector<SDValue>(N0->ops());
+ // TODO: ISD::UNDEF and ISD::POISON should get separate handling, but best
+ // leave for a future patch.
+ for (SDValue &Op : Ops) {
+ if (Op.isUndef())
+ Op = DAG.getFreeze(Op);
+ }
}
SDLoc DL(N0);
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 50649cf3caba4..0614830296f89 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -21918,6 +21918,8 @@ bool RISCVTargetLowering::canCreateUndefOrPoisonForTargetNode(
// TODO: Add more target nodes.
switch (Op.getOpcode()) {
+ case RISCVISD::READ_VLENB:
+ return false;
case RISCVISD::SLLW:
case RISCVISD::SRAW:
case RISCVISD::SRLW:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll
index e71bf15384727..cd5854f48b8fa 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll
@@ -129,7 +129,7 @@ define i32 @select_sdiv_lhs_opaque_const0_i32(i1 %cond) {
; GCN-NEXT: s_getpc_b64 s[4:5]
; GCN-NEXT: s_add_u32 s4, s4, gv at gotpcrel32@lo+4
; GCN-NEXT: s_addc_u32 s5, s5, gv at gotpcrel32@hi+12
-; GCN-NEXT: s_load_dword s4, s[4:5], 0x0
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
; GCN-NEXT: v_and_b32_e32 v0, 1, v0
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
@@ -211,7 +211,7 @@ define i32 @select_sdiv_lhs_opaque_const1_i32(i1 %cond) {
; GCN-NEXT: s_getpc_b64 s[4:5]
; GCN-NEXT: s_add_u32 s4, s4, gv at gotpcrel32@lo+4
; GCN-NEXT: s_addc_u32 s5, s5, gv at gotpcrel32@hi+12
-; GCN-NEXT: s_load_dword s4, s[4:5], 0x0
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
; GCN-NEXT: v_and_b32_e32 v0, 1, v0
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
index e7af7467171c3..9a590e1c532b8 100644
--- a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
@@ -1437,25 +1437,15 @@ define i128 @fptoui_f32_to_i128(float %x) {
}
define i128 @fptosi_f16_to_i128(half %x) {
-; SDAG-LABEL: fptosi_f16_to_i128:
-; SDAG: ; %bb.0:
-; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SDAG-NEXT: v_cvt_i32_f32_e32 v0, v0
-; SDAG-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; SDAG-NEXT: v_ashrrev_i32_e32 v2, 31, v1
-; SDAG-NEXT: v_mov_b32_e32 v3, v2
-; SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GISEL-LABEL: fptosi_f16_to_i128:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-NEXT: v_cvt_f32_f16_e32 v0, v0
-; GISEL-NEXT: v_cvt_i32_f32_e32 v0, v0
-; GISEL-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; GISEL-NEXT: v_mov_b32_e32 v2, v1
-; GISEL-NEXT: v_mov_b32_e32 v3, v1
-; GISEL-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: fptosi_f16_to_i128:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GCN-NEXT: v_cvt_i32_f32_e32 v0, v0
+; GCN-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GCN-NEXT: v_mov_b32_e32 v2, v1
+; GCN-NEXT: v_mov_b32_e32 v3, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cvt = fptosi half %x to i128
ret i128 %cvt
}
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
index f93e5f06beff9..57ba71ca7efa0 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
@@ -9843,53 +9843,53 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: s_lshr_b32 s42, s5, 30
-; GFX6-NEXT: s_lshr_b32 s36, s4, 30
-; GFX6-NEXT: s_lshr_b32 s38, s4, 31
-; GFX6-NEXT: s_lshr_b32 s30, s4, 28
-; GFX6-NEXT: s_lshr_b32 s34, s4, 29
-; GFX6-NEXT: s_lshr_b32 s26, s4, 26
-; GFX6-NEXT: s_lshr_b32 s28, s4, 27
-; GFX6-NEXT: s_lshr_b32 s22, s4, 24
-; GFX6-NEXT: s_lshr_b32 s24, s4, 25
-; GFX6-NEXT: s_lshr_b32 s18, s4, 22
-; GFX6-NEXT: s_lshr_b32 s20, s4, 23
-; GFX6-NEXT: s_lshr_b32 s14, s4, 20
-; GFX6-NEXT: s_lshr_b32 s16, s4, 21
-; GFX6-NEXT: s_lshr_b32 s10, s4, 18
-; GFX6-NEXT: s_lshr_b32 s12, s4, 19
-; GFX6-NEXT: s_lshr_b32 s6, s4, 16
-; GFX6-NEXT: s_lshr_b32 s8, s4, 17
+; GFX6-NEXT: s_lshr_b32 s36, s5, 28
+; GFX6-NEXT: s_lshr_b32 s38, s5, 29
+; GFX6-NEXT: s_lshr_b32 s30, s5, 26
+; GFX6-NEXT: s_lshr_b32 s34, s5, 27
+; GFX6-NEXT: s_lshr_b32 s26, s5, 24
+; GFX6-NEXT: s_lshr_b32 s28, s5, 25
+; GFX6-NEXT: s_lshr_b32 s22, s5, 22
+; GFX6-NEXT: s_lshr_b32 s24, s5, 23
+; GFX6-NEXT: s_lshr_b32 s18, s5, 20
+; GFX6-NEXT: s_lshr_b32 s20, s5, 21
+; GFX6-NEXT: s_lshr_b32 s14, s5, 18
+; GFX6-NEXT: s_lshr_b32 s16, s5, 19
+; GFX6-NEXT: s_lshr_b32 s10, s5, 16
+; GFX6-NEXT: s_lshr_b32 s12, s5, 17
+; GFX6-NEXT: s_lshr_b32 s6, s5, 14
+; GFX6-NEXT: s_lshr_b32 s8, s5, 15
+; GFX6-NEXT: s_mov_b32 s40, s5
; GFX6-NEXT: s_ashr_i32 s7, s5, 31
-; GFX6-NEXT: s_bfe_i64 s[44:45], s[4:5], 0x10000
+; GFX6-NEXT: s_bfe_i64 s[44:45], s[40:41], 0x10000
; GFX6-NEXT: v_mov_b32_e32 v4, s7
-; GFX6-NEXT: s_lshr_b32 s40, s4, 14
+; GFX6-NEXT: s_lshr_b32 s40, s5, 12
; GFX6-NEXT: v_mov_b32_e32 v0, s44
; GFX6-NEXT: v_mov_b32_e32 v1, s45
-; GFX6-NEXT: s_mov_b32 s44, s5
-; GFX6-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x10000
+; GFX6-NEXT: s_bfe_i64 s[44:45], s[4:5], 0x10000
; GFX6-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x10000
; GFX6-NEXT: v_mov_b32_e32 v6, s44
; GFX6-NEXT: v_mov_b32_e32 v7, s45
-; GFX6-NEXT: s_lshr_b32 s44, s4, 15
+; GFX6-NEXT: s_lshr_b32 s44, s5, 13
; GFX6-NEXT: v_mov_b32_e32 v2, s42
; GFX6-NEXT: v_mov_b32_e32 v3, s43
-; GFX6-NEXT: s_lshr_b32 s42, s4, 12
+; GFX6-NEXT: s_lshr_b32 s42, s5, 10
; GFX6-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x10000
; GFX6-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x10000
; GFX6-NEXT: v_mov_b32_e32 v8, s36
; GFX6-NEXT: v_mov_b32_e32 v9, s37
-; GFX6-NEXT: s_lshr_b32 s36, s4, 13
+; GFX6-NEXT: s_lshr_b32 s36, s5, 11
; GFX6-NEXT: v_mov_b32_e32 v10, s38
; GFX6-NEXT: v_mov_b32_e32 v11, s39
-; GFX6-NEXT: s_lshr_b32 s38, s4, 10
+; GFX6-NEXT: s_lshr_b32 s38, s5, 8
; GFX6-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x10000
; GFX6-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x10000
; GFX6-NEXT: v_mov_b32_e32 v12, s30
; GFX6-NEXT: v_mov_b32_e32 v13, s31
-; GFX6-NEXT: s_lshr_b32 s30, s4, 11
+; GFX6-NEXT: s_lshr_b32 s30, s5, 9
; GFX6-NEXT: v_mov_b32_e32 v14, s34
; GFX6-NEXT: v_mov_b32_e32 v15, s35
-; GFX6-NEXT: s_lshr_b32 s34, s4, 8
+; GFX6-NEXT: s_lshr_b32 s34, s5, 6
; GFX6-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x10000
; GFX6-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x10000
; GFX6-NEXT: v_mov_b32_e32 v5, s7
@@ -9897,191 +9897,190 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v2, s26
; GFX6-NEXT: v_mov_b32_e32 v3, s27
-; GFX6-NEXT: s_lshr_b32 s26, s4, 9
+; GFX6-NEXT: s_lshr_b32 s26, s5, 7
; GFX6-NEXT: v_mov_b32_e32 v4, s28
; GFX6-NEXT: v_mov_b32_e32 v5, s29
-; GFX6-NEXT: s_lshr_b32 s28, s4, 6
+; GFX6-NEXT: s_lshr_b32 s28, s5, 4
; GFX6-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x10000
; GFX6-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:240
+; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:480
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v8, s22
; GFX6-NEXT: v_mov_b32_e32 v9, s23
-; GFX6-NEXT: s_lshr_b32 s22, s4, 7
+; GFX6-NEXT: s_lshr_b32 s22, s5, 5
; GFX6-NEXT: v_mov_b32_e32 v10, s24
; GFX6-NEXT: v_mov_b32_e32 v11, s25
-; GFX6-NEXT: s_lshr_b32 s24, s4, 4
+; GFX6-NEXT: s_lshr_b32 s24, s5, 2
; GFX6-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x10000
; GFX6-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:224
+; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:464
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v12, s18
; GFX6-NEXT: v_mov_b32_e32 v13, s19
-; GFX6-NEXT: s_lshr_b32 s18, s4, 5
+; GFX6-NEXT: s_lshr_b32 s18, s5, 3
; GFX6-NEXT: v_mov_b32_e32 v14, s20
; GFX6-NEXT: v_mov_b32_e32 v15, s21
-; GFX6-NEXT: s_lshr_b32 s20, s4, 2
+; GFX6-NEXT: s_lshr_b32 s20, s5, 1
; GFX6-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x10000
; GFX6-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:208
+; GFX6-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:448
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v2, s14
; GFX6-NEXT: v_mov_b32_e32 v3, s15
-; GFX6-NEXT: s_lshr_b32 s14, s4, 3
+; GFX6-NEXT: s_lshr_b32 s14, s4, 30
; GFX6-NEXT: v_mov_b32_e32 v4, s16
; GFX6-NEXT: v_mov_b32_e32 v5, s17
-; GFX6-NEXT: s_lshr_b32 s16, s4, 1
+; GFX6-NEXT: s_lshr_b32 s16, s4, 31
; GFX6-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x10000
; GFX6-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:192
+; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:432
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v8, s10
; GFX6-NEXT: v_mov_b32_e32 v9, s11
-; GFX6-NEXT: s_lshr_b32 s10, s5, 29
+; GFX6-NEXT: s_lshr_b32 s10, s4, 28
; GFX6-NEXT: v_mov_b32_e32 v10, s12
; GFX6-NEXT: v_mov_b32_e32 v11, s13
-; GFX6-NEXT: s_lshr_b32 s12, s5, 28
+; GFX6-NEXT: s_lshr_b32 s12, s4, 29
; GFX6-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x10000
; GFX6-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:176
+; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:416
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v12, s6
; GFX6-NEXT: v_mov_b32_e32 v13, s7
-; GFX6-NEXT: s_lshr_b32 s6, s5, 26
+; GFX6-NEXT: s_lshr_b32 s46, s4, 26
; GFX6-NEXT: v_mov_b32_e32 v14, s8
; GFX6-NEXT: v_mov_b32_e32 v15, s9
-; GFX6-NEXT: s_lshr_b32 s8, s5, 27
-; GFX6-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x10000
+; GFX6-NEXT: s_lshr_b32 s8, s4, 27
+; GFX6-NEXT: s_bfe_i64 s[6:7], s[44:45], 0x10000
; GFX6-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:160
+; GFX6-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:400
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v2, s40
; GFX6-NEXT: v_mov_b32_e32 v3, s41
-; GFX6-NEXT: s_lshr_b32 s40, s5, 25
-; GFX6-NEXT: v_mov_b32_e32 v4, s44
-; GFX6-NEXT: v_mov_b32_e32 v5, s45
-; GFX6-NEXT: s_lshr_b32 s44, s5, 24
-; GFX6-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x10000
-; GFX6-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:144
+; GFX6-NEXT: s_lshr_b32 s40, s4, 24
+; GFX6-NEXT: v_mov_b32_e32 v4, s6
+; GFX6-NEXT: v_mov_b32_e32 v5, s7
+; GFX6-NEXT: s_lshr_b32 s44, s4, 25
+; GFX6-NEXT: s_bfe_i64 s[6:7], s[36:37], 0x10000
+; GFX6-NEXT: s_bfe_i64 s[36:37], s[42:43], 0x10000
+; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:384
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v8, s42
-; GFX6-NEXT: v_mov_b32_e32 v9, s43
-; GFX6-NEXT: s_lshr_b32 s42, s5, 22
-; GFX6-NEXT: v_mov_b32_e32 v10, s36
-; GFX6-NEXT: v_mov_b32_e32 v11, s37
-; GFX6-NEXT: s_lshr_b32 s36, s5, 23
-; GFX6-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x10000
-; GFX6-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:128
+; GFX6-NEXT: v_mov_b32_e32 v8, s36
+; GFX6-NEXT: v_mov_b32_e32 v9, s37
+; GFX6-NEXT: s_lshr_b32 s36, s4, 22
+; GFX6-NEXT: v_mov_b32_e32 v10, s6
+; GFX6-NEXT: v_mov_b32_e32 v11, s7
+; GFX6-NEXT: s_lshr_b32 s42, s4, 23
+; GFX6-NEXT: s_bfe_i64 s[6:7], s[30:31], 0x10000
+; GFX6-NEXT: s_bfe_i64 s[30:31], s[38:39], 0x10000
+; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:368
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v12, s38
-; GFX6-NEXT: v_mov_b32_e32 v13, s39
-; GFX6-NEXT: s_lshr_b32 s38, s5, 20
-; GFX6-NEXT: v_mov_b32_e32 v14, s30
-; GFX6-NEXT: v_mov_b32_e32 v15, s31
-; GFX6-NEXT: s_lshr_b32 s4, s5, 21
+; GFX6-NEXT: v_mov_b32_e32 v12, s30
+; GFX6-NEXT: v_mov_b32_e32 v13, s31
+; GFX6-NEXT: s_lshr_b32 s30, s4, 20
+; GFX6-NEXT: v_mov_b32_e32 v14, s6
+; GFX6-NEXT: v_mov_b32_e32 v15, s7
+; GFX6-NEXT: s_lshr_b32 s6, s4, 21
; GFX6-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x10000
-; GFX6-NEXT: s_bfe_i64 s[30:31], s[34:35], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:112
-; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v2, s30
-; GFX6-NEXT: v_mov_b32_e32 v3, s31
-; GFX6-NEXT: s_lshr_b32 s30, s5, 18
-; GFX6-NEXT: v_mov_b32_e32 v4, s26
-; GFX6-NEXT: v_mov_b32_e32 v5, s27
-; GFX6-NEXT: s_lshr_b32 s26, s5, 19
+; GFX6-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x10000
+; GFX6-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:352
+; GFX6-NEXT: v_mov_b32_e32 v16, s34
+; GFX6-NEXT: v_mov_b32_e32 v17, s35
+; GFX6-NEXT: s_lshr_b32 s34, s4, 18
+; GFX6-NEXT: v_mov_b32_e32 v18, s26
+; GFX6-NEXT: v_mov_b32_e32 v19, s27
+; GFX6-NEXT: s_lshr_b32 s26, s4, 19
; GFX6-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x10000
; GFX6-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:96
+; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:336
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v8, s28
; GFX6-NEXT: v_mov_b32_e32 v9, s29
-; GFX6-NEXT: s_lshr_b32 s28, s5, 17
+; GFX6-NEXT: s_lshr_b32 s28, s4, 16
; GFX6-NEXT: v_mov_b32_e32 v10, s22
; GFX6-NEXT: v_mov_b32_e32 v11, s23
-; GFX6-NEXT: s_lshr_b32 s22, s5, 16
-; GFX6-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x10000
+; GFX6-NEXT: s_lshr_b32 s22, s4, 17
; GFX6-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:80
+; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:320
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v12, s24
; GFX6-NEXT: v_mov_b32_e32 v13, s25
-; GFX6-NEXT: s_lshr_b32 s24, s5, 14
+; GFX6-NEXT: s_lshr_b32 s24, s4, 14
+; GFX6-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x10000
+; GFX6-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x10000
; GFX6-NEXT: v_mov_b32_e32 v14, s18
; GFX6-NEXT: v_mov_b32_e32 v15, s19
-; GFX6-NEXT: s_lshr_b32 s18, s5, 15
-; GFX6-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:64
-; GFX6-NEXT: v_mov_b32_e32 v16, s20
-; GFX6-NEXT: v_mov_b32_e32 v17, s21
-; GFX6-NEXT: s_lshr_b32 s20, s5, 12
-; GFX6-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x10000
+; GFX6-NEXT: s_lshr_b32 s18, s4, 15
+; GFX6-NEXT: v_mov_b32_e32 v2, s20
+; GFX6-NEXT: v_mov_b32_e32 v3, s21
+; GFX6-NEXT: s_lshr_b32 s20, s4, 12
; GFX6-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x10000
-; GFX6-NEXT: v_mov_b32_e32 v18, s14
-; GFX6-NEXT: v_mov_b32_e32 v19, s15
-; GFX6-NEXT: s_lshr_b32 s14, s5, 13
+; GFX6-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x10000
+; GFX6-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:304
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v2, s16
-; GFX6-NEXT: v_mov_b32_e32 v3, s17
-; GFX6-NEXT: s_lshr_b32 s16, s5, 10
+; GFX6-NEXT: v_mov_b32_e32 v16, s14
+; GFX6-NEXT: v_mov_b32_e32 v17, s15
+; GFX6-NEXT: s_lshr_b32 s14, s4, 13
+; GFX6-NEXT: v_mov_b32_e32 v18, s16
+; GFX6-NEXT: v_mov_b32_e32 v19, s17
+; GFX6-NEXT: s_lshr_b32 s16, s4, 10
; GFX6-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x10000
; GFX6-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:48
+; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:288
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v8, s12
-; GFX6-NEXT: v_mov_b32_e32 v9, s13
-; GFX6-NEXT: s_lshr_b32 s12, s5, 11
-; GFX6-NEXT: v_mov_b32_e32 v10, s10
-; GFX6-NEXT: v_mov_b32_e32 v11, s11
-; GFX6-NEXT: s_lshr_b32 s10, s5, 8
+; GFX6-NEXT: v_mov_b32_e32 v8, s10
+; GFX6-NEXT: v_mov_b32_e32 v9, s11
+; GFX6-NEXT: s_lshr_b32 s10, s4, 11
+; GFX6-NEXT: v_mov_b32_e32 v10, s12
+; GFX6-NEXT: v_mov_b32_e32 v11, s13
+; GFX6-NEXT: s_lshr_b32 s12, s4, 8
; GFX6-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x10000
-; GFX6-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:32
+; GFX6-NEXT: s_bfe_i64 s[38:39], s[46:47], 0x10000
+; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:272
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v12, s6
-; GFX6-NEXT: v_mov_b32_e32 v13, s7
-; GFX6-NEXT: s_lshr_b32 s6, s5, 9
+; GFX6-NEXT: v_mov_b32_e32 v12, s38
+; GFX6-NEXT: v_mov_b32_e32 v13, s39
+; GFX6-NEXT: s_lshr_b32 s38, s4, 9
; GFX6-NEXT: v_mov_b32_e32 v14, s8
; GFX6-NEXT: v_mov_b32_e32 v15, s9
-; GFX6-NEXT: s_lshr_b32 s8, s5, 6
-; GFX6-NEXT: s_bfe_i64 s[34:35], s[44:45], 0x10000
+; GFX6-NEXT: s_lshr_b32 s8, s4, 6
+; GFX6-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x10000
; GFX6-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:16
+; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:256
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v16, s34
-; GFX6-NEXT: v_mov_b32_e32 v17, s35
-; GFX6-NEXT: s_lshr_b32 s34, s5, 7
-; GFX6-NEXT: v_mov_b32_e32 v18, s40
-; GFX6-NEXT: v_mov_b32_e32 v19, s41
-; GFX6-NEXT: s_lshr_b32 s40, s5, 4
-; GFX6-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x10000
+; GFX6-NEXT: v_mov_b32_e32 v0, s40
+; GFX6-NEXT: v_mov_b32_e32 v1, s41
+; GFX6-NEXT: s_lshr_b32 s40, s4, 7
+; GFX6-NEXT: v_mov_b32_e32 v2, s44
+; GFX6-NEXT: v_mov_b32_e32 v3, s45
+; GFX6-NEXT: s_lshr_b32 s44, s4, 4
; GFX6-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; GFX6-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x10000
+; GFX6-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:240
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v0, s42
-; GFX6-NEXT: v_mov_b32_e32 v1, s43
-; GFX6-NEXT: s_lshr_b32 s42, s5, 5
-; GFX6-NEXT: v_mov_b32_e32 v2, s36
-; GFX6-NEXT: v_mov_b32_e32 v3, s37
-; GFX6-NEXT: s_lshr_b32 s36, s5, 2
-; GFX6-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:480
+; GFX6-NEXT: v_mov_b32_e32 v16, s36
+; GFX6-NEXT: v_mov_b32_e32 v17, s37
+; GFX6-NEXT: s_lshr_b32 s36, s4, 5
+; GFX6-NEXT: v_mov_b32_e32 v18, s42
+; GFX6-NEXT: v_mov_b32_e32 v19, s43
+; GFX6-NEXT: s_lshr_b32 s42, s4, 2
+; GFX6-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x10000
+; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:224
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v8, s38
-; GFX6-NEXT: v_mov_b32_e32 v9, s39
-; GFX6-NEXT: s_lshr_b32 s38, s5, 3
-; GFX6-NEXT: s_lshr_b32 s44, s5, 1
-; GFX6-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x10000
-; GFX6-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x10000
-; GFX6-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x10000
+; GFX6-NEXT: v_mov_b32_e32 v8, s30
+; GFX6-NEXT: v_mov_b32_e32 v9, s31
+; GFX6-NEXT: s_lshr_b32 s30, s4, 3
+; GFX6-NEXT: s_lshr_b32 s4, s4, 1
+; GFX6-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x10000
+; GFX6-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x10000
; GFX6-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x10000
+; GFX6-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x10000
+; GFX6-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x10000
; GFX6-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x10000
-; GFX6-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x10000
; GFX6-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x10000
-; GFX6-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x10000
-; GFX6-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x10000
+; GFX6-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x10000
; GFX6-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x10000
+; GFX6-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x10000
; GFX6-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x10000
; GFX6-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x10000
; GFX6-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x10000
@@ -10090,71 +10089,71 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o
; GFX6-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x10000
; GFX6-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x10000
; GFX6-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x10000
-; GFX6-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x10000
-; GFX6-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x10000
-; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:464
-; GFX6-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:448
-; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:432
-; GFX6-NEXT: v_mov_b32_e32 v10, s4
-; GFX6-NEXT: v_mov_b32_e32 v11, s5
-; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:416
-; GFX6-NEXT: s_waitcnt expcnt(1)
-; GFX6-NEXT: v_mov_b32_e32 v0, s30
-; GFX6-NEXT: v_mov_b32_e32 v1, s31
+; GFX6-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x10000
+; GFX6-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x10000
+; GFX6-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:208
+; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:192
+; GFX6-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:176
+; GFX6-NEXT: v_mov_b32_e32 v10, s6
+; GFX6-NEXT: v_mov_b32_e32 v11, s7
+; GFX6-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:160
+; GFX6-NEXT: s_waitcnt expcnt(2)
+; GFX6-NEXT: v_mov_b32_e32 v0, s34
+; GFX6-NEXT: v_mov_b32_e32 v1, s35
; GFX6-NEXT: v_mov_b32_e32 v2, s26
; GFX6-NEXT: v_mov_b32_e32 v3, s27
-; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:400
+; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:144
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v0, s22
-; GFX6-NEXT: v_mov_b32_e32 v1, s23
-; GFX6-NEXT: v_mov_b32_e32 v2, s28
-; GFX6-NEXT: v_mov_b32_e32 v3, s29
-; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:384
+; GFX6-NEXT: v_mov_b32_e32 v0, s28
+; GFX6-NEXT: v_mov_b32_e32 v1, s29
+; GFX6-NEXT: v_mov_b32_e32 v2, s22
+; GFX6-NEXT: v_mov_b32_e32 v3, s23
+; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:128
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v0, s24
; GFX6-NEXT: v_mov_b32_e32 v1, s25
; GFX6-NEXT: v_mov_b32_e32 v2, s18
; GFX6-NEXT: v_mov_b32_e32 v3, s19
-; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:368
+; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v0, s20
; GFX6-NEXT: v_mov_b32_e32 v1, s21
; GFX6-NEXT: v_mov_b32_e32 v2, s14
; GFX6-NEXT: v_mov_b32_e32 v3, s15
-; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:352
+; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v0, s16
; GFX6-NEXT: v_mov_b32_e32 v1, s17
-; GFX6-NEXT: v_mov_b32_e32 v2, s12
-; GFX6-NEXT: v_mov_b32_e32 v3, s13
-; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:336
+; GFX6-NEXT: v_mov_b32_e32 v2, s10
+; GFX6-NEXT: v_mov_b32_e32 v3, s11
+; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v0, s10
-; GFX6-NEXT: v_mov_b32_e32 v1, s11
-; GFX6-NEXT: v_mov_b32_e32 v2, s6
-; GFX6-NEXT: v_mov_b32_e32 v3, s7
-; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:320
+; GFX6-NEXT: v_mov_b32_e32 v0, s12
+; GFX6-NEXT: v_mov_b32_e32 v1, s13
+; GFX6-NEXT: v_mov_b32_e32 v2, s38
+; GFX6-NEXT: v_mov_b32_e32 v3, s39
+; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v0, s8
; GFX6-NEXT: v_mov_b32_e32 v1, s9
-; GFX6-NEXT: v_mov_b32_e32 v2, s34
-; GFX6-NEXT: v_mov_b32_e32 v3, s35
-; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:304
+; GFX6-NEXT: v_mov_b32_e32 v2, s40
+; GFX6-NEXT: v_mov_b32_e32 v3, s41
+; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v0, s40
-; GFX6-NEXT: v_mov_b32_e32 v1, s41
-; GFX6-NEXT: v_mov_b32_e32 v2, s42
-; GFX6-NEXT: v_mov_b32_e32 v3, s43
-; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:288
+; GFX6-NEXT: v_mov_b32_e32 v0, s44
+; GFX6-NEXT: v_mov_b32_e32 v1, s45
+; GFX6-NEXT: v_mov_b32_e32 v2, s36
+; GFX6-NEXT: v_mov_b32_e32 v3, s37
+; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v0, s36
-; GFX6-NEXT: v_mov_b32_e32 v1, s37
-; GFX6-NEXT: v_mov_b32_e32 v2, s38
-; GFX6-NEXT: v_mov_b32_e32 v3, s39
-; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:272
-; GFX6-NEXT: v_mov_b32_e32 v8, s44
-; GFX6-NEXT: v_mov_b32_e32 v9, s45
-; GFX6-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:256
+; GFX6-NEXT: v_mov_b32_e32 v0, s42
+; GFX6-NEXT: v_mov_b32_e32 v1, s43
+; GFX6-NEXT: v_mov_b32_e32 v2, s30
+; GFX6-NEXT: v_mov_b32_e32 v3, s31
+; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GFX6-NEXT: v_mov_b32_e32 v8, s4
+; GFX6-NEXT: v_mov_b32_e32 v9, s5
+; GFX6-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: constant_sextload_v64i1_to_v64i64:
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
index a135b43bad0fe..f7396eb1e1159 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
@@ -6545,33 +6545,33 @@ define amdgpu_kernel void @constant_sextload_v8i16_to_v8i64(ptr addrspace(1) %ou
; GCN-NOHSA-SI-NEXT: s_mov_b32 s2, -1
; GCN-NOHSA-SI-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NOHSA-SI-NEXT: s_mov_b32 s8, s7
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s10, s6, 16
-; GCN-NOHSA-SI-NEXT: s_mov_b32 s12, s5
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s10, s5
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s12, s6, 16
; GCN-NOHSA-SI-NEXT: s_lshr_b32 s14, s4, 16
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[4:5], 0x100000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[6:7], 0x100000
; GCN-NOHSA-SI-NEXT: s_ashr_i32 s13, s5, 31
; GCN-NOHSA-SI-NEXT: s_ashr_i32 s15, s5, 16
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[12:13], 0x100000
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s12, s7, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s20, s7, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[10:11], 0x100000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s20, s7, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s21, s7, 16
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[8:9], 0x100000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[14:15], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[12:13], 0x100000
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s6
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s7
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s20
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s12
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s21
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s20
; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s15
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s13
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s18
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s19
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s4
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s5
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s15
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s13
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:16
-; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s16
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s17
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s10
@@ -6592,8 +6592,8 @@ define amdgpu_kernel void @constant_sextload_v8i16_to_v8i64(ptr addrspace(1) %ou
; GCN-HSA-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x0
; GCN-HSA-NEXT: s_waitcnt lgkmcnt(0)
; GCN-HSA-NEXT: s_mov_b32 s2, s7
-; GCN-HSA-NEXT: s_lshr_b32 s8, s6, 16
-; GCN-HSA-NEXT: s_mov_b32 s10, s5
+; GCN-HSA-NEXT: s_mov_b32 s8, s5
+; GCN-HSA-NEXT: s_lshr_b32 s10, s6, 16
; GCN-HSA-NEXT: s_lshr_b32 s12, s4, 16
; GCN-HSA-NEXT: s_ashr_i32 s13, s5, 16
; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[4:5], 0x100000
@@ -6611,25 +6611,25 @@ define amdgpu_kernel void @constant_sextload_v8i16_to_v8i64(ptr addrspace(1) %ou
; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 32
+; GCN-HSA-NEXT: s_add_u32 s2, s0, 16
; GCN-HSA-NEXT: v_mov_b32_e32 v2, s7
; GCN-HSA-NEXT: v_mov_b32_e32 v3, s12
; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 16
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s16
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s17
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s8
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s9
+; GCN-HSA-NEXT: s_add_u32 s2, s0, 32
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s8
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s9
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s13
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s6
; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s10
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s11
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s13
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s6
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s16
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s17
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s10
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s11
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2
; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s1
@@ -7167,12 +7167,12 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) %
; GCN-NOHSA-SI-NEXT: s_mov_b32 s10, -1
; GCN-NOHSA-SI-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NOHSA-SI-NEXT: s_mov_b32 s12, s7
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s14, s6, 16
-; GCN-NOHSA-SI-NEXT: s_mov_b32 s16, s5
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s18, s4, 16
-; GCN-NOHSA-SI-NEXT: s_mov_b32 s20, s3
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s22, s2, 16
-; GCN-NOHSA-SI-NEXT: s_mov_b32 s24, s1
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s14, s5
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s16, s3
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s18, s1
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s20, s6, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s22, s4, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s24, s2, 16
; GCN-NOHSA-SI-NEXT: s_lshr_b32 s26, s0, 16
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[28:29], s[0:1], 0x100000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[30:31], s[2:3], 0x100000
@@ -7180,60 +7180,60 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) %
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[6:7], 0x100000
; GCN-NOHSA-SI-NEXT: s_ashr_i32 s21, s1, 31
; GCN-NOHSA-SI-NEXT: s_ashr_i32 s23, s1, 16
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[0:1], s[24:25], 0x100000
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s24, s3, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s25, s3, 16
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[2:3], s[20:21], 0x100000
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s20, s5, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s27, s5, 16
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[16:17], 0x100000
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s7, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s38, s7, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[0:1], s[18:19], 0x100000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s25, s3, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s27, s3, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[2:3], s[16:17], 0x100000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s5, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s38, s5, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[14:15], 0x100000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s39, s7, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s40, s7, 16
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[12:13], 0x100000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[26:27], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[24:25], 0x100000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[22:23], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[20:21], 0x100000
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s6
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s7
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s40
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s39
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:112
+; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s5
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s38
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s33
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:112
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:80
+; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s3
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s27
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s25
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:48
+; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s1
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s23
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s21
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:16
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s36
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s37
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s4
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s5
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s27
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s20
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[2:5], off, s[8:11], 0 offset:80
-; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s34
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s35
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s2
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s3
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s25
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s24
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[8:11], 0 offset:48
-; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s30
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s31
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s0
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s1
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s23
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s21
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[10:13], off, s[8:11], 0 offset:16
-; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s28
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s29
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s14
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s15
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s18
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s19
; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:96
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s18
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s19
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s16
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s17
; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[8:11], 0 offset:64
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s16
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s17
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s14
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s15
; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[8:11], 0 offset:32
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s12
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s13
@@ -7249,19 +7249,19 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) %
; GCN-HSA-NEXT: s_waitcnt lgkmcnt(0)
; GCN-HSA-NEXT: s_load_dwordx8 s[0:7], s[10:11], 0x0
; GCN-HSA-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-HSA-NEXT: s_mov_b32 s10, s7
-; GCN-HSA-NEXT: s_lshr_b32 s12, s6, 16
+; GCN-HSA-NEXT: s_mov_b32 s12, s7
; GCN-HSA-NEXT: s_mov_b32 s14, s5
-; GCN-HSA-NEXT: s_lshr_b32 s16, s4, 16
-; GCN-HSA-NEXT: s_ashr_i32 s25, s1, 31
+; GCN-HSA-NEXT: s_mov_b32 s16, s3
+; GCN-HSA-NEXT: s_mov_b32 s18, s1
+; GCN-HSA-NEXT: s_ashr_i32 s27, s1, 31
; GCN-HSA-NEXT: s_ashr_i32 s29, s3, 31
; GCN-HSA-NEXT: s_ashr_i32 s30, s3, 16
-; GCN-HSA-NEXT: s_mov_b32 s18, s3
-; GCN-HSA-NEXT: s_lshr_b32 s20, s2, 16
-; GCN-HSA-NEXT: s_mov_b32 s22, s1
-; GCN-HSA-NEXT: s_lshr_b32 s24, s0, 16
-; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[2:3], 0x100000
-; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[10:11], 0x100000
+; GCN-HSA-NEXT: s_lshr_b32 s20, s6, 16
+; GCN-HSA-NEXT: s_lshr_b32 s22, s4, 16
+; GCN-HSA-NEXT: s_lshr_b32 s24, s2, 16
+; GCN-HSA-NEXT: s_lshr_b32 s26, s0, 16
+; GCN-HSA-NEXT: s_bfe_i64 s[10:11], s[2:3], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[12:13], 0x100000
; GCN-HSA-NEXT: s_ashr_i32 s28, s1, 16
; GCN-HSA-NEXT: s_ashr_i32 s31, s5, 31
; GCN-HSA-NEXT: s_ashr_i32 s33, s5, 16
@@ -7272,36 +7272,55 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) %
; GCN-HSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x100000
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[24:25], 0x100000
-; GCN-HSA-NEXT: s_bfe_i64 s[10:11], s[22:23], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[26:27], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[12:13], s[24:25], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x100000
; GCN-HSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x100000
; GCN-HSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x100000
; GCN-HSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x100000
; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x100000
-; GCN-HSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x100000
-; GCN-HSA-NEXT: s_add_u32 s22, s8, 0x70
-; GCN-HSA-NEXT: s_addc_u32 s23, s9, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v4, s6
-; GCN-HSA-NEXT: s_add_u32 s6, s8, 0x60
-; GCN-HSA-NEXT: v_mov_b32_e32 v5, s7
-; GCN-HSA-NEXT: s_addc_u32 s7, s9, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v8, s22
-; GCN-HSA-NEXT: v_mov_b32_e32 v11, s7
-; GCN-HSA-NEXT: v_mov_b32_e32 v9, s23
-; GCN-HSA-NEXT: v_mov_b32_e32 v10, s6
+; GCN-HSA-NEXT: s_add_u32 s24, s8, 0x70
+; GCN-HSA-NEXT: s_addc_u32 s25, s9, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s14
+; GCN-HSA-NEXT: s_add_u32 s14, s8, 0x50
+; GCN-HSA-NEXT: v_mov_b32_e32 v8, s24
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s15
+; GCN-HSA-NEXT: s_addc_u32 s15, s9, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s14
+; GCN-HSA-NEXT: v_mov_b32_e32 v9, s25
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s15
; GCN-HSA-NEXT: v_mov_b32_e32 v2, s35
; GCN-HSA-NEXT: v_mov_b32_e32 v3, s34
-; GCN-HSA-NEXT: s_add_u32 s6, s8, 0x50
-; GCN-HSA-NEXT: v_mov_b32_e32 v6, s12
-; GCN-HSA-NEXT: v_mov_b32_e32 v7, s13
+; GCN-HSA-NEXT: s_add_u32 s14, s8, 48
+; GCN-HSA-NEXT: v_mov_b32_e32 v6, s33
+; GCN-HSA-NEXT: v_mov_b32_e32 v7, s31
; GCN-HSA-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
; GCN-HSA-NEXT: flat_store_dwordx4 v[10:11], v[4:7]
+; GCN-HSA-NEXT: s_addc_u32 s15, s9, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s14
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s16
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s17
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s30
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s29
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s15
+; GCN-HSA-NEXT: s_add_u32 s14, s8, 16
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT: s_addc_u32 s15, s9, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s14
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s18
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s19
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s28
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s27
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s15
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT: s_nop 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s6
+; GCN-HSA-NEXT: s_add_u32 s6, s8, 0x60
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s7
; GCN-HSA-NEXT: s_addc_u32 s7, s9, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s6
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s14
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s15
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s33
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s31
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s20
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s21
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s7
; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: s_nop 0
@@ -7310,35 +7329,17 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) %
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s5
; GCN-HSA-NEXT: s_addc_u32 s5, s9, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s4
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s16
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s17
-; GCN-HSA-NEXT: v_mov_b32_e32 v5, s5
-; GCN-HSA-NEXT: s_add_u32 s4, s8, 48
-; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GCN-HSA-NEXT: s_addc_u32 s5, s9, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v4, s4
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s18
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s19
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s30
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s29
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s22
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s23
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s5
; GCN-HSA-NEXT: s_add_u32 s4, s8, 32
; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: s_addc_u32 s5, s9, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s4
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s26
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s27
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s20
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s21
-; GCN-HSA-NEXT: v_mov_b32_e32 v5, s5
-; GCN-HSA-NEXT: s_add_u32 s4, s8, 16
-; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GCN-HSA-NEXT: s_addc_u32 s5, s9, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v4, s4
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s10
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s11
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s28
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s25
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s12
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s13
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s5
; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s8
@@ -8312,151 +8313,148 @@ define amdgpu_kernel void @constant_sextload_v32i16_to_v32i64(ptr addrspace(1) %
; GCN-NOHSA-SI-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NOHSA-SI-NEXT: s_mov_b32 s18, s15
; GCN-NOHSA-SI-NEXT: s_mov_b32 s20, s13
-; GCN-NOHSA-SI-NEXT: s_mov_b32 s22, s11
-; GCN-NOHSA-SI-NEXT: s_mov_b32 s24, s9
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s23, s1, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s25, s1, 16
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s27, s3, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s29, s3, 16
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s31, s5, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s5, 16
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s35, s7, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s37, s7, 16
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s39, s9, 31
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s24, s11
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s26, s9
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s22, s7
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s1, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s39, s1, 16
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s41, s3, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s43, s3, 16
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s23, s5, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s27, s5, 16
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s29, s7, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s31, s7, 16
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s35, s9, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s37, s9, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[46:47], s[26:27], 0x100000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s55, s11, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s57, s11, 16
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[48:49], s[24:25], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[50:51], s[22:23], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[52:53], s[20:21], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[54:55], s[18:19], 0x100000
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s43, s9, 16
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s45, s11, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s47, s11, 16
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s41, s13, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s56, s13, 16
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s57, s15, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s58, s15, 16
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s38, s14, 16
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s24, s12, 16
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s22, s10, 16
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s26, s8, 16
-; GCN-NOHSA-SI-NEXT: s_mov_b32 s46, s7
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s6, 16
-; GCN-NOHSA-SI-NEXT: s_mov_b32 s44, s5
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s30, s4, 16
-; GCN-NOHSA-SI-NEXT: s_mov_b32 s42, s3
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s34, s2, 16
-; GCN-NOHSA-SI-NEXT: s_mov_b32 s40, s1
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s36, s0, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[50:51], s[20:21], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[52:53], s[18:19], 0x100000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s58, s13, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s59, s13, 16
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s60, s15, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s61, s15, 16
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s54, s5
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s44, s3
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s56, s1
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s30, s14, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s36, s12, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s34, s10, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s8, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s26, s6, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s38, s4, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s40, s2, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s42, s0, 16
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[0:1], 0x100000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[20:21], s[2:3], 0x100000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x100000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[2:3], s[12:13], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[14:15], 0x100000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[24:25], s[14:15], 0x100000
; GCN-NOHSA-SI-NEXT: s_mov_b32 s0, s16
; GCN-NOHSA-SI-NEXT: s_mov_b32 s1, s17
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s54
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s55
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s12
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s13
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s52
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s53
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s2
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s3
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s50
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s51
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v18, s48
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v19, s49
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s58
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s57
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s56
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s41
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s52
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s53
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s50
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s51
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s48
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s49
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s46
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s47
; GCN-NOHSA-SI-NEXT: s_mov_b32 s3, 0xf000
; GCN-NOHSA-SI-NEXT: s_mov_b32 s2, -1
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[46:47], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[44:45], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[42:43], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x100000
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s47
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s45
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v20, s43
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v21, s39
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:240
-; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s12
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s13
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s37
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s35
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:208
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[22:23], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[54:55], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[46:47], s[56:57], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x100000
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s14
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s15
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s61
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s60
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s59
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s58
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s57
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s55
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s37
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s35
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v18, s31
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v19, s29
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v20, s16
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:240
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v21, s17
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v22, s27
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:208
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v23, s23
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[42:43], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[40:41], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[22:23], s[38:39], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x100000
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:176
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:144
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:112
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:80
+; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(5)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s44
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s45
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s43
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s41
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s14
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:176
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s15
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s33
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s31
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:144
-; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(1)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s16
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:112
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s17
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s29
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:80
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[38:39], 0x100000
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s27
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:48
-; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(2)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s40
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s41
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s25
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s23
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s46
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s47
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s39
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s33
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s24
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s25
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s12
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s13
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s10
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s11
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s8
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s9
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[36:37], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[34:35], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[30:31], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[28:29], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x100000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x100000
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s6
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s7
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v20, s4
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v21, s5
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s12
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s13
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v24, s20
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v25, s21
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s30
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s31
; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:224
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s20
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s21
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s24
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s25
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s18
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s19
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s36
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s37
; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:192
-; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s18
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s19
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s22
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s23
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s34
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s35
; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:160
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s26
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s27
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s28
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s29
; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:128
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v18, s16
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v19, s17
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v18, s26
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v19, s27
; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:96
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v22, s14
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v23, s15
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v22, s22
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v23, s23
; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:64
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s10
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s11
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s8
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s9
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v26, s16
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v27, s17
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s14
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s15
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GCN-NOHSA-SI-NEXT: s_endpgm
;
; GCN-HSA-LABEL: constant_sextload_v32i16_to_v32i64:
@@ -8468,47 +8466,47 @@ define amdgpu_kernel void @constant_sextload_v32i16_to_v32i64(ptr addrspace(1) %
; GCN-HSA-NEXT: s_waitcnt lgkmcnt(0)
; GCN-HSA-NEXT: s_load_dwordx16 s[0:15], s[18:19], 0x0
; GCN-HSA-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-HSA-NEXT: s_mov_b32 s24, s15
-; GCN-HSA-NEXT: s_ashr_i32 s37, s3, 31
-; GCN-HSA-NEXT: s_ashr_i32 s38, s3, 16
-; GCN-HSA-NEXT: s_ashr_i32 s57, s11, 16
-; GCN-HSA-NEXT: s_ashr_i32 s59, s13, 31
-; GCN-HSA-NEXT: s_ashr_i32 s61, s13, 16
-; GCN-HSA-NEXT: s_ashr_i32 s63, s15, 31
-; GCN-HSA-NEXT: s_ashr_i32 s65, s15, 16
-; GCN-HSA-NEXT: s_lshr_b32 s46, s14, 16
-; GCN-HSA-NEXT: s_mov_b32 s48, s13
-; GCN-HSA-NEXT: s_lshr_b32 s50, s12, 16
-; GCN-HSA-NEXT: s_mov_b32 s52, s11
-; GCN-HSA-NEXT: s_lshr_b32 s34, s10, 16
-; GCN-HSA-NEXT: s_mov_b32 s30, s9
-; GCN-HSA-NEXT: s_lshr_b32 s28, s8, 16
-; GCN-HSA-NEXT: s_mov_b32 s54, s7
-; GCN-HSA-NEXT: s_lshr_b32 s56, s6, 16
-; GCN-HSA-NEXT: s_mov_b32 s58, s5
-; GCN-HSA-NEXT: s_lshr_b32 s60, s4, 16
-; GCN-HSA-NEXT: s_mov_b32 s62, s3
-; GCN-HSA-NEXT: s_lshr_b32 s64, s2, 16
-; GCN-HSA-NEXT: s_mov_b32 s66, s1
+; GCN-HSA-NEXT: s_mov_b32 s34, s15
+; GCN-HSA-NEXT: s_ashr_i32 s41, s3, 31
+; GCN-HSA-NEXT: s_ashr_i32 s42, s3, 16
+; GCN-HSA-NEXT: s_ashr_i32 s57, s5, 16
+; GCN-HSA-NEXT: s_ashr_i32 s59, s7, 31
+; GCN-HSA-NEXT: s_ashr_i32 s61, s7, 16
+; GCN-HSA-NEXT: s_ashr_i32 s63, s9, 31
+; GCN-HSA-NEXT: s_ashr_i32 s65, s9, 16
+; GCN-HSA-NEXT: s_ashr_i32 s67, s11, 31
+; GCN-HSA-NEXT: s_ashr_i32 s69, s11, 16
+; GCN-HSA-NEXT: s_mov_b32 s44, s13
+; GCN-HSA-NEXT: s_mov_b32 s46, s11
+; GCN-HSA-NEXT: s_mov_b32 s48, s9
+; GCN-HSA-NEXT: s_mov_b32 s50, s7
+; GCN-HSA-NEXT: s_mov_b32 s52, s5
+; GCN-HSA-NEXT: s_mov_b32 s38, s3
+; GCN-HSA-NEXT: s_mov_b32 s36, s1
+; GCN-HSA-NEXT: s_lshr_b32 s54, s14, 16
+; GCN-HSA-NEXT: s_lshr_b32 s56, s12, 16
+; GCN-HSA-NEXT: s_lshr_b32 s58, s10, 16
+; GCN-HSA-NEXT: s_lshr_b32 s60, s8, 16
+; GCN-HSA-NEXT: s_lshr_b32 s62, s6, 16
+; GCN-HSA-NEXT: s_lshr_b32 s64, s4, 16
+; GCN-HSA-NEXT: s_lshr_b32 s66, s2, 16
; GCN-HSA-NEXT: s_lshr_b32 s68, s0, 16
; GCN-HSA-NEXT: s_bfe_i64 s[18:19], s[2:3], 0x100000
-; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[24:25], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[34:35], 0x100000
; GCN-HSA-NEXT: s_ashr_i32 s33, s1, 31
-; GCN-HSA-NEXT: s_ashr_i32 s36, s1, 16
-; GCN-HSA-NEXT: s_ashr_i32 s39, s5, 31
-; GCN-HSA-NEXT: s_ashr_i32 s40, s5, 16
-; GCN-HSA-NEXT: s_ashr_i32 s41, s7, 31
-; GCN-HSA-NEXT: s_ashr_i32 s42, s7, 16
-; GCN-HSA-NEXT: s_ashr_i32 s43, s9, 31
-; GCN-HSA-NEXT: s_ashr_i32 s44, s9, 16
-; GCN-HSA-NEXT: s_ashr_i32 s45, s11, 31
+; GCN-HSA-NEXT: s_ashr_i32 s40, s1, 16
+; GCN-HSA-NEXT: s_ashr_i32 s43, s5, 31
+; GCN-HSA-NEXT: s_ashr_i32 s70, s13, 31
+; GCN-HSA-NEXT: s_ashr_i32 s71, s13, 16
+; GCN-HSA-NEXT: s_ashr_i32 s72, s15, 31
+; GCN-HSA-NEXT: s_ashr_i32 s73, s15, 16
; GCN-HSA-NEXT: s_bfe_i64 s[0:1], s[0:1], 0x100000
; GCN-HSA-NEXT: s_bfe_i64 s[20:21], s[4:5], 0x100000
; GCN-HSA-NEXT: s_bfe_i64 s[22:23], s[6:7], 0x100000
-; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[8:9], 0x100000
-; GCN-HSA-NEXT: s_bfe_i64 s[70:71], s[10:11], 0x100000
-; GCN-HSA-NEXT: s_bfe_i64 s[72:73], s[12:13], 0x100000
-; GCN-HSA-NEXT: s_bfe_i64 s[74:75], s[14:15], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[24:25], s[8:9], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[10:11], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[28:29], s[12:13], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[30:31], s[14:15], 0x100000
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[68:69], 0x100000
@@ -8518,149 +8516,149 @@ define amdgpu_kernel void @constant_sextload_v32i16_to_v32i64(ptr addrspace(1) %
; GCN-HSA-NEXT: s_bfe_i64 s[10:11], s[60:61], 0x100000
; GCN-HSA-NEXT: s_bfe_i64 s[12:13], s[58:59], 0x100000
; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[56:57], 0x100000
-; GCN-HSA-NEXT: s_bfe_i64 s[24:25], s[54:55], 0x100000
-; GCN-HSA-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x100000
-; GCN-HSA-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x100000
-; GCN-HSA-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[34:35], s[54:55], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x100000
; GCN-HSA-NEXT: s_bfe_i64 s[52:53], s[52:53], 0x100000
; GCN-HSA-NEXT: s_bfe_i64 s[50:51], s[50:51], 0x100000
; GCN-HSA-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x100000
; GCN-HSA-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x100000
; GCN-HSA-NEXT: s_add_u32 s54, s16, 0xf0
; GCN-HSA-NEXT: s_addc_u32 s55, s17, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v6, s46
-; GCN-HSA-NEXT: s_add_u32 s46, s16, 0xe0
-; GCN-HSA-NEXT: v_mov_b32_e32 v7, s47
-; GCN-HSA-NEXT: s_addc_u32 s47, s17, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v24, s46
-; GCN-HSA-NEXT: v_mov_b32_e32 v25, s47
-; GCN-HSA-NEXT: s_add_u32 s46, s16, 0xd0
-; GCN-HSA-NEXT: s_addc_u32 s47, s17, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v26, s46
-; GCN-HSA-NEXT: v_mov_b32_e32 v27, s47
-; GCN-HSA-NEXT: s_add_u32 s46, s16, 0xc0
-; GCN-HSA-NEXT: s_addc_u32 s47, s17, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v28, s46
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s44
+; GCN-HSA-NEXT: s_add_u32 s44, s16, 0xd0
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s45
+; GCN-HSA-NEXT: s_addc_u32 s45, s17, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v23, s44
+; GCN-HSA-NEXT: v_mov_b32_e32 v24, s45
+; GCN-HSA-NEXT: s_add_u32 s44, s16, 0xb0
+; GCN-HSA-NEXT: s_addc_u32 s45, s17, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v25, s44
+; GCN-HSA-NEXT: v_mov_b32_e32 v26, s45
+; GCN-HSA-NEXT: s_add_u32 s44, s16, 0x90
+; GCN-HSA-NEXT: s_addc_u32 s45, s17, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v27, s44
; GCN-HSA-NEXT: v_mov_b32_e32 v18, s54
-; GCN-HSA-NEXT: v_mov_b32_e32 v29, s47
-; GCN-HSA-NEXT: s_add_u32 s46, s16, 0xb0
+; GCN-HSA-NEXT: v_mov_b32_e32 v28, s45
+; GCN-HSA-NEXT: s_add_u32 s44, s16, 0x70
; GCN-HSA-NEXT: v_mov_b32_e32 v19, s55
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s65
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s63
-; GCN-HSA-NEXT: s_addc_u32 s47, s17, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s73
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s72
+; GCN-HSA-NEXT: s_addc_u32 s45, s17, 0
; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[0:3]
-; GCN-HSA-NEXT: v_mov_b32_e32 v4, s74
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s30
-; GCN-HSA-NEXT: s_add_u32 s30, s16, 0xa0
-; GCN-HSA-NEXT: v_mov_b32_e32 v5, s75
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s31
+; GCN-HSA-NEXT: v_mov_b32_e32 v6, s71
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s38
+; GCN-HSA-NEXT: s_add_u32 s38, s16, 0x50
+; GCN-HSA-NEXT: v_mov_b32_e32 v7, s70
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s39
+; GCN-HSA-NEXT: s_addc_u32 s39, s17, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v8, s46
+; GCN-HSA-NEXT: v_mov_b32_e32 v9, s47
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s69
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s67
+; GCN-HSA-NEXT: flat_store_dwordx4 v[23:24], v[4:7]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[25:26], v[8:11]
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s36
+; GCN-HSA-NEXT: s_add_u32 s36, s16, 48
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s37
+; GCN-HSA-NEXT: s_addc_u32 s37, s17, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v24, s36
+; GCN-HSA-NEXT: v_mov_b32_e32 v25, s37
+; GCN-HSA-NEXT: s_add_u32 s36, s16, 16
+; GCN-HSA-NEXT: s_addc_u32 s37, s17, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v8, s30
+; GCN-HSA-NEXT: s_add_u32 s30, s16, 0xe0
+; GCN-HSA-NEXT: v_mov_b32_e32 v12, s48
+; GCN-HSA-NEXT: v_mov_b32_e32 v13, s49
+; GCN-HSA-NEXT: v_mov_b32_e32 v14, s65
+; GCN-HSA-NEXT: v_mov_b32_e32 v15, s63
+; GCN-HSA-NEXT: v_mov_b32_e32 v9, s31
; GCN-HSA-NEXT: s_addc_u32 s31, s17, 0
-; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[4:7]
-; GCN-HSA-NEXT: v_mov_b32_e32 v8, s48
-; GCN-HSA-NEXT: v_mov_b32_e32 v4, s26
-; GCN-HSA-NEXT: s_add_u32 s26, s16, 0x90
-; GCN-HSA-NEXT: v_mov_b32_e32 v5, s27
-; GCN-HSA-NEXT: s_addc_u32 s27, s17, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v24, s26
-; GCN-HSA-NEXT: v_mov_b32_e32 v25, s27
-; GCN-HSA-NEXT: s_add_u32 s26, s16, 0x80
-; GCN-HSA-NEXT: v_mov_b32_e32 v9, s49
-; GCN-HSA-NEXT: v_mov_b32_e32 v10, s61
-; GCN-HSA-NEXT: v_mov_b32_e32 v11, s59
-; GCN-HSA-NEXT: s_addc_u32 s27, s17, 0
-; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[8:11]
-; GCN-HSA-NEXT: v_mov_b32_e32 v12, s72
-; GCN-HSA-NEXT: v_mov_b32_e32 v8, s24
-; GCN-HSA-NEXT: s_add_u32 s24, s16, 0x70
-; GCN-HSA-NEXT: v_mov_b32_e32 v13, s73
-; GCN-HSA-NEXT: v_mov_b32_e32 v14, s50
-; GCN-HSA-NEXT: v_mov_b32_e32 v15, s51
-; GCN-HSA-NEXT: v_mov_b32_e32 v9, s25
-; GCN-HSA-NEXT: s_addc_u32 s25, s17, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v30, s46
-; GCN-HSA-NEXT: flat_store_dwordx4 v[28:29], v[12:15]
-; GCN-HSA-NEXT: v_mov_b32_e32 v16, s52
+; GCN-HSA-NEXT: v_mov_b32_e32 v29, s44
+; GCN-HSA-NEXT: flat_store_dwordx4 v[27:28], v[12:15]
+; GCN-HSA-NEXT: v_mov_b32_e32 v16, s50
; GCN-HSA-NEXT: v_mov_b32_e32 v14, s14
-; GCN-HSA-NEXT: s_add_u32 s14, s16, 0x60
-; GCN-HSA-NEXT: v_mov_b32_e32 v17, s53
-; GCN-HSA-NEXT: v_mov_b32_e32 v31, s47
-; GCN-HSA-NEXT: v_mov_b32_e32 v18, s57
-; GCN-HSA-NEXT: v_mov_b32_e32 v19, s45
-; GCN-HSA-NEXT: v_mov_b32_e32 v10, s30
+; GCN-HSA-NEXT: s_add_u32 s14, s16, 0xc0
+; GCN-HSA-NEXT: v_mov_b32_e32 v17, s51
+; GCN-HSA-NEXT: v_mov_b32_e32 v30, s45
+; GCN-HSA-NEXT: v_mov_b32_e32 v18, s61
+; GCN-HSA-NEXT: v_mov_b32_e32 v19, s59
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s38
; GCN-HSA-NEXT: v_mov_b32_e32 v15, s15
; GCN-HSA-NEXT: s_addc_u32 s15, s17, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v20, s70
-; GCN-HSA-NEXT: v_mov_b32_e32 v21, s71
-; GCN-HSA-NEXT: v_mov_b32_e32 v22, s34
-; GCN-HSA-NEXT: v_mov_b32_e32 v23, s35
-; GCN-HSA-NEXT: v_mov_b32_e32 v11, s31
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s44
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s43
-; GCN-HSA-NEXT: v_mov_b32_e32 v26, s26
-; GCN-HSA-NEXT: flat_store_dwordx4 v[30:31], v[16:19]
-; GCN-HSA-NEXT: v_mov_b32_e32 v27, s27
-; GCN-HSA-NEXT: v_mov_b32_e32 v16, s24
+; GCN-HSA-NEXT: v_mov_b32_e32 v20, s52
+; GCN-HSA-NEXT: v_mov_b32_e32 v21, s53
+; GCN-HSA-NEXT: v_mov_b32_e32 v22, s57
+; GCN-HSA-NEXT: v_mov_b32_e32 v23, s43
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s39
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s42
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s41
+; GCN-HSA-NEXT: v_mov_b32_e32 v26, s36
+; GCN-HSA-NEXT: flat_store_dwordx4 v[29:30], v[16:19]
+; GCN-HSA-NEXT: v_mov_b32_e32 v27, s37
+; GCN-HSA-NEXT: v_mov_b32_e32 v16, s30
; GCN-HSA-NEXT: v_mov_b32_e32 v19, s15
-; GCN-HSA-NEXT: v_mov_b32_e32 v6, s28
-; GCN-HSA-NEXT: v_mov_b32_e32 v7, s29
+; GCN-HSA-NEXT: v_mov_b32_e32 v6, s40
+; GCN-HSA-NEXT: v_mov_b32_e32 v7, s33
; GCN-HSA-NEXT: flat_store_dwordx4 v[10:11], v[20:23]
-; GCN-HSA-NEXT: v_mov_b32_e32 v12, s22
-; GCN-HSA-NEXT: v_mov_b32_e32 v10, s42
-; GCN-HSA-NEXT: v_mov_b32_e32 v11, s41
-; GCN-HSA-NEXT: v_mov_b32_e32 v13, s23
-; GCN-HSA-NEXT: v_mov_b32_e32 v17, s25
+; GCN-HSA-NEXT: v_mov_b32_e32 v12, s28
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s34
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s35
+; GCN-HSA-NEXT: v_mov_b32_e32 v13, s29
+; GCN-HSA-NEXT: v_mov_b32_e32 v17, s31
; GCN-HSA-NEXT: v_mov_b32_e32 v18, s14
; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[0:3]
; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[4:7]
; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[8:11]
; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[12:15]
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s12
-; GCN-HSA-NEXT: s_add_u32 s12, s16, 0x50
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s13
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s12
+; GCN-HSA-NEXT: s_add_u32 s12, s16, 0xa0
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s13
; GCN-HSA-NEXT: s_addc_u32 s13, s17, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s12
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s40
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s39
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s26
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s27
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s13
; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: s_nop 0
; GCN-HSA-NEXT: v_mov_b32_e32 v2, s10
-; GCN-HSA-NEXT: s_add_u32 s10, s16, 64
+; GCN-HSA-NEXT: s_add_u32 s10, s16, 0x80
; GCN-HSA-NEXT: v_mov_b32_e32 v3, s11
; GCN-HSA-NEXT: s_addc_u32 s11, s17, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s10
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s20
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s21
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s24
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s25
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s11
; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: s_nop 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s8
-; GCN-HSA-NEXT: s_add_u32 s8, s16, 48
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s9
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s8
+; GCN-HSA-NEXT: s_add_u32 s8, s16, 0x60
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s9
; GCN-HSA-NEXT: s_addc_u32 s9, s17, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s8
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s38
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s37
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s22
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s23
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s9
; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: s_nop 0
; GCN-HSA-NEXT: v_mov_b32_e32 v2, s6
-; GCN-HSA-NEXT: s_add_u32 s6, s16, 32
+; GCN-HSA-NEXT: s_add_u32 s6, s16, 64
; GCN-HSA-NEXT: v_mov_b32_e32 v3, s7
; GCN-HSA-NEXT: s_addc_u32 s7, s17, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s6
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s18
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s19
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s20
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s21
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s7
; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: s_nop 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s4
-; GCN-HSA-NEXT: s_add_u32 s4, s16, 16
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s5
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s4
+; GCN-HSA-NEXT: s_add_u32 s4, s16, 32
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s5
; GCN-HSA-NEXT: s_addc_u32 s5, s17, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s4
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s36
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s33
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s18
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s19
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s5
; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s16
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
index b534c2c267fad..5c4bc95578bb4 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
@@ -6398,41 +6398,41 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out
; GFX6-NOHSA-NEXT: s_mov_b32 s2, -1
; GFX6-NOHSA-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NOHSA-NEXT: s_lshr_b32 s6, s5, 16
-; GFX6-NOHSA-NEXT: s_lshr_b32 s8, s4, 16
-; GFX6-NOHSA-NEXT: s_lshr_b32 s10, s4, 24
-; GFX6-NOHSA-NEXT: s_lshr_b32 s12, s4, 8
-; GFX6-NOHSA-NEXT: s_lshr_b32 s14, s5, 8
-; GFX6-NOHSA-NEXT: s_mov_b32 s16, s5
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000
+; GFX6-NOHSA-NEXT: s_lshr_b32 s8, s5, 8
+; GFX6-NOHSA-NEXT: s_mov_b32 s10, s5
+; GFX6-NOHSA-NEXT: s_lshr_b32 s12, s4, 16
+; GFX6-NOHSA-NEXT: s_lshr_b32 s14, s4, 24
+; GFX6-NOHSA-NEXT: s_lshr_b32 s16, s4, 8
; GFX6-NOHSA-NEXT: s_bfe_i64 s[18:19], s[4:5], 0x80000
-; GFX6-NOHSA-NEXT: s_ashr_i32 s15, s5, 31
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GFX6-NOHSA-NEXT: s_ashr_i32 s17, s5, 31
; GFX6-NOHSA-NEXT: s_ashr_i32 s20, s5, 24
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[4:5], s[14:15], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[4:5], s[16:17], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
; GFX6-NOHSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
; GFX6-NOHSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000
; GFX6-NOHSA-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000
; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s20
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s15
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s18
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s19
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s16
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s17
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s17
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s10
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s11
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s18
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s19
; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s6
; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s7
; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
-; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s8
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s9
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s10
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s11
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s8
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s9
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:32
+; GFX6-NOHSA-NEXT: s_waitcnt expcnt(1)
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s12
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s13
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s14
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s15
; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s12
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s13
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0
; GFX6-NOHSA-NEXT: v_mov_b32_e32 v10, s4
; GFX6-NOHSA-NEXT: v_mov_b32_e32 v11, s5
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0
; GFX6-NOHSA-NEXT: s_endpgm
;
; GFX7-HSA-LABEL: constant_sextload_v8i8_to_v8i64:
@@ -6445,11 +6445,11 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out
; GFX7-HSA-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
; GFX7-HSA-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-HSA-NEXT: s_lshr_b32 s4, s3, 16
-; GFX7-HSA-NEXT: s_lshr_b32 s6, s2, 16
-; GFX7-HSA-NEXT: s_lshr_b32 s8, s2, 24
-; GFX7-HSA-NEXT: s_lshr_b32 s10, s2, 8
-; GFX7-HSA-NEXT: s_lshr_b32 s12, s3, 8
-; GFX7-HSA-NEXT: s_mov_b32 s14, s3
+; GFX7-HSA-NEXT: s_lshr_b32 s6, s3, 8
+; GFX7-HSA-NEXT: s_mov_b32 s8, s3
+; GFX7-HSA-NEXT: s_lshr_b32 s10, s2, 16
+; GFX7-HSA-NEXT: s_lshr_b32 s12, s2, 24
+; GFX7-HSA-NEXT: s_lshr_b32 s14, s2, 8
; GFX7-HSA-NEXT: s_ashr_i32 s5, s3, 31
; GFX7-HSA-NEXT: s_bfe_i64 s[16:17], s[2:3], 0x80000
; GFX7-HSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
@@ -6465,32 +6465,32 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out
; GFX7-HSA-NEXT: s_addc_u32 s3, s1, 0
; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s3
; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s2
-; GFX7-HSA-NEXT: s_add_u32 s2, s0, 16
+; GFX7-HSA-NEXT: s_add_u32 s2, s0, 32
; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s18
; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s5
; GFX7-HSA-NEXT: s_addc_u32 s3, s1, 0
; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s3
-; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s6
-; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s7
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s8
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s9
+; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s2
+; GFX7-HSA-NEXT: s_add_u32 s2, s0, 16
+; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s8
+; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s9
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s7
+; GFX7-HSA-NEXT: s_addc_u32 s3, s1, 0
+; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s3
+; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s10
+; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s11
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s12
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s13
; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s2
; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s1
-; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s0
-; GFX7-HSA-NEXT: s_add_u32 s0, s0, 32
; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s16
; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s17
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s10
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s11
-; GFX7-HSA-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s1
-; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s14
-; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s15
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s12
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s13
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s14
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s15
; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s0
; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX7-HSA-NEXT: s_endpgm
@@ -6502,11 +6502,11 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out
; GFX8-NOHSA-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
; GFX8-NOHSA-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NOHSA-NEXT: s_lshr_b32 s4, s3, 16
-; GFX8-NOHSA-NEXT: s_lshr_b32 s6, s2, 16
-; GFX8-NOHSA-NEXT: s_lshr_b32 s8, s2, 24
-; GFX8-NOHSA-NEXT: s_lshr_b32 s10, s2, 8
-; GFX8-NOHSA-NEXT: s_lshr_b32 s12, s3, 8
-; GFX8-NOHSA-NEXT: s_mov_b32 s14, s3
+; GFX8-NOHSA-NEXT: s_lshr_b32 s6, s3, 8
+; GFX8-NOHSA-NEXT: s_mov_b32 s8, s3
+; GFX8-NOHSA-NEXT: s_lshr_b32 s10, s2, 16
+; GFX8-NOHSA-NEXT: s_lshr_b32 s12, s2, 24
+; GFX8-NOHSA-NEXT: s_lshr_b32 s14, s2, 8
; GFX8-NOHSA-NEXT: s_ashr_i32 s5, s3, 31
; GFX8-NOHSA-NEXT: s_bfe_i64 s[16:17], s[2:3], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
@@ -6522,32 +6522,32 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out
; GFX8-NOHSA-NEXT: s_addc_u32 s3, s1, 0
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s3
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s2
-; GFX8-NOHSA-NEXT: s_add_u32 s2, s0, 16
+; GFX8-NOHSA-NEXT: s_add_u32 s2, s0, 32
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s18
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s5
; GFX8-NOHSA-NEXT: s_addc_u32 s3, s1, 0
; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s3
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s6
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s7
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s8
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s9
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s2
+; GFX8-NOHSA-NEXT: s_add_u32 s2, s0, 16
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s8
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s9
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s6
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s7
+; GFX8-NOHSA-NEXT: s_addc_u32 s3, s1, 0
+; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s3
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s10
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s11
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s12
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s13
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s2
; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s1
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s0
-; GFX8-NOHSA-NEXT: s_add_u32 s0, s0, 32
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s16
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s17
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s10
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s11
-; GFX8-NOHSA-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s1
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s14
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s15
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s12
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s13
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s14
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s15
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s0
; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX8-NOHSA-NEXT: s_endpgm
@@ -6615,34 +6615,34 @@ define amdgpu_kernel void @constant_sextload_v8i8_to_v8i64(ptr addrspace(1) %out
; GFX12-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_lshr_b32 s4, s3, 16
-; GFX12-NEXT: s_lshr_b32 s6, s2, 16
-; GFX12-NEXT: s_lshr_b32 s8, s2, 24
-; GFX12-NEXT: s_lshr_b32 s10, s2, 8
-; GFX12-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000
-; GFX12-NEXT: s_lshr_b32 s12, s3, 8
-; GFX12-NEXT: s_mov_b32 s14, s3
+; GFX12-NEXT: s_lshr_b32 s6, s3, 8
+; GFX12-NEXT: s_mov_b32 s8, s3
+; GFX12-NEXT: s_lshr_b32 s10, s2, 16
+; GFX12-NEXT: s_lshr_b32 s12, s2, 24
; GFX12-NEXT: s_bfe_i64 s[16:17], s[2:3], 0x80000
; GFX12-NEXT: s_ashr_i32 s15, s3, 31
; GFX12-NEXT: s_ashr_i32 s18, s3, 24
-; GFX12-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000
+; GFX12-NEXT: s_lshr_b32 s14, s2, 8
+; GFX12-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000
; GFX12-NEXT: v_dual_mov_b32 v16, 0 :: v_dual_mov_b32 v3, s15
-; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v9, s7
-; GFX12-NEXT: v_dual_mov_b32 v8, s6 :: v_dual_mov_b32 v11, s9
-; GFX12-NEXT: v_dual_mov_b32 v10, s8 :: v_dual_mov_b32 v7, s11
-; GFX12-NEXT: s_bfe_i64 s[2:3], s[14:15], 0x80000
; GFX12-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
; GFX12-NEXT: v_dual_mov_b32 v2, s18 :: v_dual_mov_b32 v5, s17
; GFX12-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v1, s5
-; GFX12-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v13, s3
-; GFX12-NEXT: v_dual_mov_b32 v12, s2 :: v_dual_mov_b32 v15, s13
-; GFX12-NEXT: v_mov_b32_e32 v14, s12
+; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v9, s9
+; GFX12-NEXT: s_bfe_i64 s[2:3], s[14:15], 0x80000
+; GFX12-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v11, s7
+; GFX12-NEXT: v_dual_mov_b32 v10, s6 :: v_dual_mov_b32 v13, s11
+; GFX12-NEXT: v_dual_mov_b32 v12, s10 :: v_dual_mov_b32 v15, s13
+; GFX12-NEXT: v_dual_mov_b32 v14, s12 :: v_dual_mov_b32 v7, s3
+; GFX12-NEXT: v_mov_b32_e32 v6, s2
; GFX12-NEXT: s_clause 0x3
-; GFX12-NEXT: global_store_b128 v16, v[8:11], s[0:1] offset:16
-; GFX12-NEXT: global_store_b128 v16, v[4:7], s[0:1]
; GFX12-NEXT: global_store_b128 v16, v[0:3], s[0:1] offset:48
-; GFX12-NEXT: global_store_b128 v16, v[12:15], s[0:1] offset:32
+; GFX12-NEXT: global_store_b128 v16, v[8:11], s[0:1] offset:32
+; GFX12-NEXT: global_store_b128 v16, v[12:15], s[0:1] offset:16
+; GFX12-NEXT: global_store_b128 v16, v[4:7], s[0:1]
; GFX12-NEXT: s_endpgm
%load = load <8 x i8>, ptr addrspace(4) %in
%ext = sext <8 x i8> %load to <8 x i64>
@@ -7033,81 +7033,80 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(ptr addrspace(1) %o
; GFX6-NOHSA-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NOHSA-NEXT: s_mov_b32 s2, -1
; GFX6-NOHSA-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NOHSA-NEXT: s_lshr_b32 s12, s6, 16
-; GFX6-NOHSA-NEXT: s_lshr_b32 s14, s6, 24
-; GFX6-NOHSA-NEXT: s_lshr_b32 s16, s6, 8
-; GFX6-NOHSA-NEXT: s_lshr_b32 s18, s4, 16
-; GFX6-NOHSA-NEXT: s_lshr_b32 s20, s4, 24
-; GFX6-NOHSA-NEXT: s_lshr_b32 s22, s4, 8
-; GFX6-NOHSA-NEXT: s_lshr_b32 s24, s7, 16
-; GFX6-NOHSA-NEXT: s_lshr_b32 s10, s7, 8
-; GFX6-NOHSA-NEXT: s_mov_b32 s26, s7
-; GFX6-NOHSA-NEXT: s_lshr_b32 s28, s5, 16
-; GFX6-NOHSA-NEXT: s_lshr_b32 s30, s5, 8
-; GFX6-NOHSA-NEXT: s_mov_b32 s8, s5
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[34:35], s[4:5], 0x80000
+; GFX6-NOHSA-NEXT: s_lshr_b32 s10, s7, 16
+; GFX6-NOHSA-NEXT: s_lshr_b32 s12, s7, 8
+; GFX6-NOHSA-NEXT: s_mov_b32 s14, s7
+; GFX6-NOHSA-NEXT: s_lshr_b32 s16, s6, 16
+; GFX6-NOHSA-NEXT: s_lshr_b32 s18, s6, 24
+; GFX6-NOHSA-NEXT: s_lshr_b32 s20, s6, 8
+; GFX6-NOHSA-NEXT: s_lshr_b32 s22, s5, 16
+; GFX6-NOHSA-NEXT: s_lshr_b32 s24, s5, 8
+; GFX6-NOHSA-NEXT: s_mov_b32 s26, s5
+; GFX6-NOHSA-NEXT: s_lshr_b32 s28, s4, 16
+; GFX6-NOHSA-NEXT: s_lshr_b32 s30, s4, 24
+; GFX6-NOHSA-NEXT: s_lshr_b32 s34, s4, 8
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[8:9], s[4:5], 0x80000
; GFX6-NOHSA-NEXT: s_bfe_i64 s[36:37], s[6:7], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GFX6-NOHSA-NEXT: s_ashr_i32 s29, s5, 31
-; GFX6-NOHSA-NEXT: s_ashr_i32 s31, s5, 24
; GFX6-NOHSA-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000
-; GFX6-NOHSA-NEXT: s_ashr_i32 s33, s7, 31
+; GFX6-NOHSA-NEXT: s_ashr_i32 s31, s5, 31
+; GFX6-NOHSA-NEXT: s_ashr_i32 s33, s5, 24
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
+; GFX6-NOHSA-NEXT: s_ashr_i32 s35, s7, 31
; GFX6-NOHSA-NEXT: s_ashr_i32 s38, s7, 24
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[4:5], s[30:31], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[6:7], s[28:29], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[4:5], s[34:35], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[6:7], s[30:31], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000
; GFX6-NOHSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
; GFX6-NOHSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
; GFX6-NOHSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
; GFX6-NOHSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
; GFX6-NOHSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s36
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s37
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s34
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s35
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s38
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s33
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v10, s26
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v11, s27
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s12
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s13
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v14, s14
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v15, s15
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:80
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s38
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s35
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s14
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s15
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s36
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s37
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s33
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s31
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v14, s26
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v15, s27
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s10
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s11
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v14, s31
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v15, s29
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s8
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s9
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s12
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s13
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:96
; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s16
; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s17
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
-; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s18
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s19
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s20
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s21
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s8
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s9
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s22
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s23
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s18
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s19
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:80
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v10, s20
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v11, s21
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:64
; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s24
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s25
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:112
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s10
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s11
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:96
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v10, s22
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v11, s23
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:48
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v16, s24
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v17, s25
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:32
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s28
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s29
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s6
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s7
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:16
; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s6
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s7
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s4
; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s5
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GFX6-NOHSA-NEXT: s_endpgm
;
; GFX7-HSA-LABEL: constant_sextload_v16i8_to_v16i64:
@@ -7119,33 +7118,31 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(ptr addrspace(1) %o
; GFX7-HSA-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-HSA-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x0
; GFX7-HSA-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-HSA-NEXT: s_lshr_b32 s2, s6, 16
-; GFX7-HSA-NEXT: s_lshr_b32 s8, s6, 24
-; GFX7-HSA-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x80000
-; GFX7-HSA-NEXT: s_lshr_b32 s10, s6, 8
-; GFX7-HSA-NEXT: s_lshr_b32 s12, s4, 16
-; GFX7-HSA-NEXT: s_lshr_b32 s14, s4, 24
-; GFX7-HSA-NEXT: s_lshr_b32 s16, s4, 8
-; GFX7-HSA-NEXT: s_lshr_b32 s18, s7, 16
-; GFX7-HSA-NEXT: s_lshr_b32 s20, s7, 8
-; GFX7-HSA-NEXT: s_ashr_i32 s27, s5, 31
-; GFX7-HSA-NEXT: s_ashr_i32 s29, s5, 24
-; GFX7-HSA-NEXT: s_mov_b32 s22, s7
-; GFX7-HSA-NEXT: s_lshr_b32 s24, s5, 16
-; GFX7-HSA-NEXT: s_lshr_b32 s26, s5, 8
-; GFX7-HSA-NEXT: s_mov_b32 s28, s5
-; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-HSA-NEXT: s_bfe_i64 s[2:3], s[8:9], 0x80000
+; GFX7-HSA-NEXT: s_lshr_b32 s8, s7, 16
+; GFX7-HSA-NEXT: s_lshr_b32 s10, s7, 8
+; GFX7-HSA-NEXT: s_mov_b32 s12, s7
+; GFX7-HSA-NEXT: s_lshr_b32 s14, s6, 16
+; GFX7-HSA-NEXT: s_lshr_b32 s16, s6, 24
+; GFX7-HSA-NEXT: s_lshr_b32 s18, s6, 8
+; GFX7-HSA-NEXT: s_lshr_b32 s20, s5, 16
+; GFX7-HSA-NEXT: s_lshr_b32 s22, s5, 8
+; GFX7-HSA-NEXT: s_ashr_i32 s29, s5, 31
+; GFX7-HSA-NEXT: s_ashr_i32 s31, s5, 24
+; GFX7-HSA-NEXT: s_mov_b32 s24, s5
+; GFX7-HSA-NEXT: s_lshr_b32 s26, s4, 16
+; GFX7-HSA-NEXT: s_lshr_b32 s28, s4, 24
+; GFX7-HSA-NEXT: s_lshr_b32 s30, s4, 8
+; GFX7-HSA-NEXT: s_bfe_i64 s[2:3], s[4:5], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[4:5], s[8:9], 0x80000
; GFX7-HSA-NEXT: s_ashr_i32 s33, s7, 31
-; GFX7-HSA-NEXT: s_ashr_i32 s34, s7, 24
-; GFX7-HSA-NEXT: s_bfe_i64 s[30:31], s[4:5], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-HSA-NEXT: s_bfe_i64 s[4:5], s[28:29], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[2:3], s[26:27], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[8:9], s[24:25], 0x80000
+; GFX7-HSA-NEXT: s_ashr_i32 s36, s7, 24
+; GFX7-HSA-NEXT: s_bfe_i64 s[34:35], s[6:7], 0x80000
+; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-HSA-NEXT: s_bfe_i64 s[4:5], s[30:31], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[6:7], s[28:29], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[8:9], s[26:27], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
; GFX7-HSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
; GFX7-HSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
; GFX7-HSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000
@@ -7153,70 +7150,73 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(ptr addrspace(1) %o
; GFX7-HSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
; GFX7-HSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
; GFX7-HSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
-; GFX7-HSA-NEXT: s_add_u32 s24, s0, 0x50
-; GFX7-HSA-NEXT: s_addc_u32 s25, s1, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s6
-; GFX7-HSA-NEXT: s_add_u32 s6, s0, 64
-; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s7
-; GFX7-HSA-NEXT: s_addc_u32 s7, s1, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v8, s24
-; GFX7-HSA-NEXT: v_mov_b32_e32 v11, s7
-; GFX7-HSA-NEXT: v_mov_b32_e32 v9, s25
-; GFX7-HSA-NEXT: v_mov_b32_e32 v10, s6
-; GFX7-HSA-NEXT: s_add_u32 s6, s0, 16
+; GFX7-HSA-NEXT: s_add_u32 s26, s0, 0x70
+; GFX7-HSA-NEXT: s_addc_u32 s27, s1, 0
; GFX7-HSA-NEXT: v_mov_b32_e32 v6, s10
+; GFX7-HSA-NEXT: s_add_u32 s10, s0, 0x60
+; GFX7-HSA-NEXT: v_mov_b32_e32 v8, s26
; GFX7-HSA-NEXT: v_mov_b32_e32 v7, s11
+; GFX7-HSA-NEXT: s_addc_u32 s11, s1, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v10, s10
+; GFX7-HSA-NEXT: v_mov_b32_e32 v9, s27
+; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s12
+; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s13
+; GFX7-HSA-NEXT: v_mov_b32_e32 v11, s11
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s36
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s33
+; GFX7-HSA-NEXT: s_add_u32 s10, s0, 0x50
; GFX7-HSA-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
; GFX7-HSA-NEXT: flat_store_dwordx4 v[10:11], v[4:7]
-; GFX7-HSA-NEXT: s_addc_u32 s7, s1, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s6
-; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s12
-; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s13
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s14
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s15
-; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s7
-; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s1
-; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s30
-; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s31
+; GFX7-HSA-NEXT: s_addc_u32 s11, s1, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s10
+; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s14
+; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s15
; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s16
; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s17
-; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s0
-; GFX7-HSA-NEXT: s_add_u32 s6, s0, 0x70
+; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s11
+; GFX7-HSA-NEXT: s_add_u32 s10, s0, 64
; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX7-HSA-NEXT: s_addc_u32 s7, s1, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s6
-; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s18
-; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s19
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s34
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s33
-; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s7
-; GFX7-HSA-NEXT: s_add_u32 s6, s0, 0x60
+; GFX7-HSA-NEXT: s_addc_u32 s11, s1, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s10
+; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s34
+; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s35
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s18
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s19
+; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s11
+; GFX7-HSA-NEXT: s_add_u32 s10, s0, 48
; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX7-HSA-NEXT: s_addc_u32 s7, s1, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s6
-; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s7
-; GFX7-HSA-NEXT: s_add_u32 s6, s0, 48
-; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s22
-; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s23
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s20
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s21
-; GFX7-HSA-NEXT: s_addc_u32 s7, s1, 0
+; GFX7-HSA-NEXT: s_addc_u32 s11, s1, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s10
+; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s20
+; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s21
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s31
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s29
+; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s11
+; GFX7-HSA-NEXT: s_add_u32 s10, s0, 32
+; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GFX7-HSA-NEXT: s_addc_u32 s11, s1, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s10
+; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s24
+; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s25
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s22
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s23
+; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s11
; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GFX7-HSA-NEXT: s_nop 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-HSA-NEXT: s_add_u32 s6, s0, 16
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s7
+; GFX7-HSA-NEXT: s_addc_u32 s7, s1, 0
; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s6
-; GFX7-HSA-NEXT: s_add_u32 s0, s0, 32
; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s8
; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s9
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s29
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s27
; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s7
-; GFX7-HSA-NEXT: s_addc_u32 s1, s1, 0
; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s1
-; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s3
+; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s4
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s5
; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s0
; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX7-HSA-NEXT: s_endpgm
@@ -7225,109 +7225,107 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(ptr addrspace(1) %o
; GFX8-NOHSA: ; %bb.0:
; GFX8-NOHSA-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NOHSA-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NOHSA-NEXT: s_load_dwordx4 s[8:11], s[2:3], 0x0
+; GFX8-NOHSA-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x0
; GFX8-NOHSA-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NOHSA-NEXT: s_lshr_b32 s12, s10, 16
-; GFX8-NOHSA-NEXT: s_lshr_b32 s14, s10, 24
-; GFX8-NOHSA-NEXT: s_lshr_b32 s16, s10, 8
-; GFX8-NOHSA-NEXT: s_lshr_b32 s18, s8, 16
-; GFX8-NOHSA-NEXT: s_lshr_b32 s20, s8, 24
-; GFX8-NOHSA-NEXT: s_lshr_b32 s22, s8, 8
-; GFX8-NOHSA-NEXT: s_lshr_b32 s24, s11, 16
-; GFX8-NOHSA-NEXT: s_lshr_b32 s26, s11, 8
-; GFX8-NOHSA-NEXT: s_mov_b32 s28, s11
-; GFX8-NOHSA-NEXT: s_lshr_b32 s6, s9, 16
-; GFX8-NOHSA-NEXT: s_lshr_b32 s4, s9, 8
-; GFX8-NOHSA-NEXT: s_mov_b32 s2, s9
-; GFX8-NOHSA-NEXT: s_ashr_i32 s25, s9, 31
-; GFX8-NOHSA-NEXT: s_ashr_i32 s29, s9, 24
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[30:31], s[8:9], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[34:35], s[10:11], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[8:9], s[28:29], 0x80000
+; GFX8-NOHSA-NEXT: s_lshr_b32 s18, s7, 16
+; GFX8-NOHSA-NEXT: s_lshr_b32 s20, s7, 8
+; GFX8-NOHSA-NEXT: s_mov_b32 s22, s7
+; GFX8-NOHSA-NEXT: s_lshr_b32 s24, s6, 16
+; GFX8-NOHSA-NEXT: s_lshr_b32 s26, s6, 24
+; GFX8-NOHSA-NEXT: s_lshr_b32 s28, s6, 8
+; GFX8-NOHSA-NEXT: s_lshr_b32 s30, s5, 16
+; GFX8-NOHSA-NEXT: s_lshr_b32 s16, s5, 8
+; GFX8-NOHSA-NEXT: s_mov_b32 s14, s5
+; GFX8-NOHSA-NEXT: s_lshr_b32 s12, s4, 16
+; GFX8-NOHSA-NEXT: s_lshr_b32 s10, s4, 24
+; GFX8-NOHSA-NEXT: s_lshr_b32 s8, s4, 8
+; GFX8-NOHSA-NEXT: s_ashr_i32 s19, s5, 31
+; GFX8-NOHSA-NEXT: s_ashr_i32 s31, s5, 24
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[2:3], s[4:5], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[34:35], s[6:7], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[4:5], s[30:31], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000
-; GFX8-NOHSA-NEXT: s_ashr_i32 s28, s11, 31
-; GFX8-NOHSA-NEXT: s_ashr_i32 s33, s11, 24
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[10:11], s[24:25], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s12
-; GFX8-NOHSA-NEXT: s_add_u32 s12, s0, 0x50
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s13
-; GFX8-NOHSA-NEXT: s_addc_u32 s13, s1, 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s12
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s14
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s15
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s13
-; GFX8-NOHSA-NEXT: s_add_u32 s12, s0, 64
-; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT: s_addc_u32 s13, s1, 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s12
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s34
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s35
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s16
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s17
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s13
-; GFX8-NOHSA-NEXT: s_add_u32 s12, s0, 16
-; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT: s_addc_u32 s13, s1, 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s12
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s18
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s19
+; GFX8-NOHSA-NEXT: s_ashr_i32 s30, s7, 31
+; GFX8-NOHSA-NEXT: s_ashr_i32 s33, s7, 24
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[6:7], s[18:19], 0x80000
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s6
+; GFX8-NOHSA-NEXT: s_add_u32 s6, s0, 0x70
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s7
+; GFX8-NOHSA-NEXT: s_addc_u32 s7, s1, 0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s6
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s33
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s30
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s7
+; GFX8-NOHSA-NEXT: s_add_u32 s6, s0, 0x60
+; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT: s_addc_u32 s7, s1, 0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s6
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s22
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s23
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s20
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s21
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s13
-; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s1
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s30
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s31
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s22
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s23
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s0
-; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT: s_nop 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s10
-; GFX8-NOHSA-NEXT: s_add_u32 s10, s0, 0x70
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s11
-; GFX8-NOHSA-NEXT: s_addc_u32 s11, s1, 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s10
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s33
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s28
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s11
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s7
+; GFX8-NOHSA-NEXT: s_add_u32 s6, s0, 0x50
; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT: s_nop 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s8
-; GFX8-NOHSA-NEXT: s_add_u32 s8, s0, 0x60
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s9
-; GFX8-NOHSA-NEXT: s_addc_u32 s9, s1, 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s8
+; GFX8-NOHSA-NEXT: s_addc_u32 s7, s1, 0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s6
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s24
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s25
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s26
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s27
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s9
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s7
+; GFX8-NOHSA-NEXT: s_add_u32 s6, s0, 64
; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT: s_nop 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s6
-; GFX8-NOHSA-NEXT: s_add_u32 s6, s0, 48
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s7
; GFX8-NOHSA-NEXT: s_addc_u32 s7, s1, 0
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s6
-; GFX8-NOHSA-NEXT: s_add_u32 s0, s0, 32
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s29
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s25
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s34
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s35
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s28
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s29
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s7
-; GFX8-NOHSA-NEXT: s_addc_u32 s1, s1, 0
+; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT: s_nop 0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NOHSA-NEXT: s_add_u32 s4, s0, 48
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s5
+; GFX8-NOHSA-NEXT: s_addc_u32 s5, s1, 0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s31
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s19
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s5
+; GFX8-NOHSA-NEXT: s_add_u32 s4, s0, 32
+; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT: s_addc_u32 s5, s1, 0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s14
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s15
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s16
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s17
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s5
+; GFX8-NOHSA-NEXT: s_add_u32 s4, s0, 16
+; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT: s_addc_u32 s5, s1, 0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s12
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s13
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s10
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s11
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s5
; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s1
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s2
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s4
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s5
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s8
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s9
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s0
; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX8-NOHSA-NEXT: s_endpgm
@@ -7437,64 +7435,64 @@ define amdgpu_kernel void @constant_sextload_v16i8_to_v16i64(ptr addrspace(1) %o
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_load_b128 s[4:7], s[2:3], 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_lshr_b32 s2, s6, 16
-; GFX12-NEXT: s_lshr_b32 s8, s6, 24
-; GFX12-NEXT: s_lshr_b32 s10, s6, 8
-; GFX12-NEXT: s_bfe_i64 s[30:31], s[4:5], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x80000
-; GFX12-NEXT: s_lshr_b32 s12, s4, 16
-; GFX12-NEXT: s_lshr_b32 s14, s4, 24
+; GFX12-NEXT: s_lshr_b32 s8, s7, 16
+; GFX12-NEXT: s_lshr_b32 s10, s7, 8
+; GFX12-NEXT: s_mov_b32 s12, s7
; GFX12-NEXT: s_bfe_i64 s[34:35], s[6:7], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v24, 0 :: v_dual_mov_b32 v1, s35
-; GFX12-NEXT: s_lshr_b32 s16, s4, 8
-; GFX12-NEXT: v_dual_mov_b32 v4, s30 :: v_dual_mov_b32 v9, s3
-; GFX12-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v11, s9
-; GFX12-NEXT: v_dual_mov_b32 v10, s8 :: v_dual_mov_b32 v3, s11
-; GFX12-NEXT: s_lshr_b32 s18, s7, 16
-; GFX12-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v0, s34 :: v_dual_mov_b32 v5, s31
-; GFX12-NEXT: v_dual_mov_b32 v2, s10 :: v_dual_mov_b32 v13, s13
-; GFX12-NEXT: s_lshr_b32 s20, s7, 8
-; GFX12-NEXT: s_mov_b32 s22, s7
-; GFX12-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000
-; GFX12-NEXT: s_lshr_b32 s24, s5, 16
; GFX12-NEXT: s_ashr_i32 s33, s7, 31
; GFX12-NEXT: s_ashr_i32 s36, s7, 24
+; GFX12-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000
+; GFX12-NEXT: s_lshr_b32 s14, s6, 16
+; GFX12-NEXT: s_lshr_b32 s16, s6, 24
+; GFX12-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GFX12-NEXT: v_dual_mov_b32 v24, 0 :: v_dual_mov_b32 v3, s33
+; GFX12-NEXT: s_lshr_b32 s18, s6, 8
+; GFX12-NEXT: v_dual_mov_b32 v2, s36 :: v_dual_mov_b32 v5, s35
+; GFX12-NEXT: v_dual_mov_b32 v4, s34 :: v_dual_mov_b32 v1, s9
+; GFX12-NEXT: v_dual_mov_b32 v0, s8 :: v_dual_mov_b32 v9, s13
+; GFX12-NEXT: s_lshr_b32 s20, s5, 16
+; GFX12-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
+; GFX12-NEXT: v_dual_mov_b32 v8, s12 :: v_dual_mov_b32 v11, s11
+; GFX12-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v13, s15
+; GFX12-NEXT: s_lshr_b32 s22, s5, 8
+; GFX12-NEXT: s_mov_b32 s24, s5
; GFX12-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v15, s15
-; GFX12-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v7, s17
-; GFX12-NEXT: s_lshr_b32 s26, s5, 8
-; GFX12-NEXT: s_mov_b32 s28, s5
-; GFX12-NEXT: s_ashr_i32 s27, s5, 31
-; GFX12-NEXT: s_ashr_i32 s29, s5, 24
-; GFX12-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
+; GFX12-NEXT: s_lshr_b32 s26, s4, 16
+; GFX12-NEXT: s_lshr_b32 s28, s4, 24
+; GFX12-NEXT: s_ashr_i32 s29, s5, 31
+; GFX12-NEXT: s_ashr_i32 s31, s5, 24
; GFX12-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
-; GFX12-NEXT: v_mov_b32_e32 v6, s16
+; GFX12-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v15, s17
+; GFX12-NEXT: v_dual_mov_b32 v14, s16 :: v_dual_mov_b32 v7, s19
+; GFX12-NEXT: s_lshr_b32 s30, s4, 8
; GFX12-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
+; GFX12-NEXT: v_mov_b32_e32 v6, s18
+; GFX12-NEXT: s_bfe_i64 s[6:7], s[28:29], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000
; GFX12-NEXT: s_clause 0x1
-; GFX12-NEXT: global_store_b128 v24, v[8:11], s[0:1] offset:80
-; GFX12-NEXT: global_store_b128 v24, v[0:3], s[0:1] offset:64
-; GFX12-NEXT: v_dual_mov_b32 v0, s18 :: v_dual_mov_b32 v3, s33
-; GFX12-NEXT: v_dual_mov_b32 v1, s19 :: v_dual_mov_b32 v2, s36
-; GFX12-NEXT: v_mov_b32_e32 v9, s23
-; GFX12-NEXT: s_bfe_i64 s[4:5], s[28:29], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[6:7], s[26:27], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v8, s22 :: v_dual_mov_b32 v11, s21
-; GFX12-NEXT: v_dual_mov_b32 v10, s20 :: v_dual_mov_b32 v17, s25
-; GFX12-NEXT: v_dual_mov_b32 v16, s24 :: v_dual_mov_b32 v19, s27
-; GFX12-NEXT: v_dual_mov_b32 v18, s29 :: v_dual_mov_b32 v21, s5
-; GFX12-NEXT: v_dual_mov_b32 v20, s4 :: v_dual_mov_b32 v23, s7
-; GFX12-NEXT: v_mov_b32_e32 v22, s6
-; GFX12-NEXT: s_clause 0x5
-; GFX12-NEXT: global_store_b128 v24, v[12:15], s[0:1] offset:16
-; GFX12-NEXT: global_store_b128 v24, v[4:7], s[0:1]
; GFX12-NEXT: global_store_b128 v24, v[0:3], s[0:1] offset:112
; GFX12-NEXT: global_store_b128 v24, v[8:11], s[0:1] offset:96
-; GFX12-NEXT: global_store_b128 v24, v[16:19], s[0:1] offset:48
-; GFX12-NEXT: global_store_b128 v24, v[20:23], s[0:1] offset:32
+; GFX12-NEXT: v_dual_mov_b32 v0, s20 :: v_dual_mov_b32 v3, s29
+; GFX12-NEXT: v_dual_mov_b32 v1, s21 :: v_dual_mov_b32 v2, s31
+; GFX12-NEXT: v_mov_b32_e32 v9, s25
+; GFX12-NEXT: s_bfe_i64 s[2:3], s[4:5], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[4:5], s[30:31], 0x80000
+; GFX12-NEXT: v_dual_mov_b32 v8, s24 :: v_dual_mov_b32 v11, s23
+; GFX12-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v17, s27
+; GFX12-NEXT: v_dual_mov_b32 v16, s26 :: v_dual_mov_b32 v19, s7
+; GFX12-NEXT: v_dual_mov_b32 v18, s6 :: v_dual_mov_b32 v21, s3
+; GFX12-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v23, s5
+; GFX12-NEXT: v_mov_b32_e32 v22, s4
+; GFX12-NEXT: s_clause 0x5
+; GFX12-NEXT: global_store_b128 v24, v[12:15], s[0:1] offset:80
+; GFX12-NEXT: global_store_b128 v24, v[4:7], s[0:1] offset:64
+; GFX12-NEXT: global_store_b128 v24, v[0:3], s[0:1] offset:48
+; GFX12-NEXT: global_store_b128 v24, v[8:11], s[0:1] offset:32
+; GFX12-NEXT: global_store_b128 v24, v[16:19], s[0:1] offset:16
+; GFX12-NEXT: global_store_b128 v24, v[20:23], s[0:1]
; GFX12-NEXT: s_endpgm
%load = load <16 x i8>, ptr addrspace(4) %in
%ext = sext <16 x i8> %load to <16 x i64>
@@ -8206,157 +8204,157 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
; GFX6-NOHSA-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NOHSA-NEXT: s_load_dwordx8 s[0:7], s[10:11], 0x0
; GFX6-NOHSA-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NOHSA-NEXT: s_lshr_b32 s14, s7, 16
+; GFX6-NOHSA-NEXT: s_lshr_b32 s22, s7, 8
+; GFX6-NOHSA-NEXT: s_mov_b32 s30, s7
; GFX6-NOHSA-NEXT: s_lshr_b32 s28, s6, 16
-; GFX6-NOHSA-NEXT: s_lshr_b32 s30, s6, 24
-; GFX6-NOHSA-NEXT: s_lshr_b32 s20, s6, 8
-; GFX6-NOHSA-NEXT: s_lshr_b32 s26, s4, 16
-; GFX6-NOHSA-NEXT: s_lshr_b32 s22, s4, 24
-; GFX6-NOHSA-NEXT: s_lshr_b32 s24, s4, 8
-; GFX6-NOHSA-NEXT: s_lshr_b32 s10, s2, 16
-; GFX6-NOHSA-NEXT: s_lshr_b32 s12, s2, 24
-; GFX6-NOHSA-NEXT: s_lshr_b32 s14, s2, 8
-; GFX6-NOHSA-NEXT: s_lshr_b32 s16, s0, 16
-; GFX6-NOHSA-NEXT: s_lshr_b32 s18, s0, 24
-; GFX6-NOHSA-NEXT: s_mov_b32 s34, s7
+; GFX6-NOHSA-NEXT: s_lshr_b32 s24, s6, 24
+; GFX6-NOHSA-NEXT: s_lshr_b32 s26, s6, 8
+; GFX6-NOHSA-NEXT: s_lshr_b32 s10, s5, 16
+; GFX6-NOHSA-NEXT: s_lshr_b32 s12, s5, 8
+; GFX6-NOHSA-NEXT: s_mov_b32 s34, s5
+; GFX6-NOHSA-NEXT: s_lshr_b32 s16, s4, 16
+; GFX6-NOHSA-NEXT: s_lshr_b32 s18, s4, 24
+; GFX6-NOHSA-NEXT: s_lshr_b32 s20, s4, 8
; GFX6-NOHSA-NEXT: s_ashr_i32 s11, s1, 31
; GFX6-NOHSA-NEXT: s_ashr_i32 s13, s1, 24
-; GFX6-NOHSA-NEXT: s_ashr_i32 s15, s3, 31
-; GFX6-NOHSA-NEXT: s_ashr_i32 s17, s3, 24
-; GFX6-NOHSA-NEXT: s_ashr_i32 s33, s5, 31
-; GFX6-NOHSA-NEXT: s_ashr_i32 s49, s5, 24
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[38:39], s[34:35], 0x80000
-; GFX6-NOHSA-NEXT: s_ashr_i32 s19, s7, 31
-; GFX6-NOHSA-NEXT: s_ashr_i32 s21, s7, 24
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[52:53], s[30:31], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[54:55], s[28:29], 0x80000
-; GFX6-NOHSA-NEXT: s_lshr_b32 s28, s0, 8
-; GFX6-NOHSA-NEXT: s_lshr_b32 s30, s7, 16
-; GFX6-NOHSA-NEXT: s_lshr_b32 s34, s7, 8
-; GFX6-NOHSA-NEXT: s_lshr_b32 s36, s5, 16
-; GFX6-NOHSA-NEXT: s_lshr_b32 s40, s5, 8
-; GFX6-NOHSA-NEXT: s_mov_b32 s46, s5
-; GFX6-NOHSA-NEXT: s_lshr_b32 s42, s3, 16
-; GFX6-NOHSA-NEXT: s_lshr_b32 s44, s3, 8
-; GFX6-NOHSA-NEXT: s_mov_b32 s50, s3
-; GFX6-NOHSA-NEXT: s_lshr_b32 s48, s1, 16
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[56:57], s[0:1], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[58:59], s[4:5], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[60:61], s[6:7], 0x80000
-; GFX6-NOHSA-NEXT: s_lshr_b32 s4, s1, 8
-; GFX6-NOHSA-NEXT: s_mov_b32 s6, s1
+; GFX6-NOHSA-NEXT: s_ashr_i32 s17, s3, 31
+; GFX6-NOHSA-NEXT: s_ashr_i32 s19, s3, 24
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[56:57], s[34:35], 0x80000
+; GFX6-NOHSA-NEXT: s_ashr_i32 s21, s5, 31
+; GFX6-NOHSA-NEXT: s_ashr_i32 s23, s5, 24
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[58:59], s[30:31], 0x80000
+; GFX6-NOHSA-NEXT: s_ashr_i32 s25, s7, 31
+; GFX6-NOHSA-NEXT: s_ashr_i32 s27, s7, 24
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[60:61], s[14:15], 0x80000
+; GFX6-NOHSA-NEXT: s_lshr_b32 s30, s3, 16
+; GFX6-NOHSA-NEXT: s_lshr_b32 s34, s3, 8
+; GFX6-NOHSA-NEXT: s_mov_b32 s44, s3
+; GFX6-NOHSA-NEXT: s_lshr_b32 s36, s2, 16
+; GFX6-NOHSA-NEXT: s_lshr_b32 s38, s2, 24
+; GFX6-NOHSA-NEXT: s_lshr_b32 s40, s2, 8
+; GFX6-NOHSA-NEXT: s_lshr_b32 s42, s1, 16
+; GFX6-NOHSA-NEXT: s_lshr_b32 s46, s1, 8
+; GFX6-NOHSA-NEXT: s_mov_b32 s52, s1
+; GFX6-NOHSA-NEXT: s_lshr_b32 s48, s0, 16
+; GFX6-NOHSA-NEXT: s_lshr_b32 s50, s0, 24
+; GFX6-NOHSA-NEXT: s_lshr_b32 s54, s0, 8
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[14:15], s[0:1], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[62:63], s[4:5], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[4:5], s[2:3], 0x80000
; GFX6-NOHSA-NEXT: s_mov_b32 s0, s8
; GFX6-NOHSA-NEXT: s_mov_b32 s1, s9
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s60
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s61
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s58
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s59
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s2
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s3
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s56
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s57
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v16, s38
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v17, s39
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v18, s54
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v19, s55
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v20, s52
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v21, s53
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v22, s21
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v23, s19
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s58
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s59
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s6
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s7
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s56
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s57
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s62
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s63
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v14, s60
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v15, s61
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v16, s27
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v17, s25
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v18, s23
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v19, s21
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v20, s19
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v21, s17
; GFX6-NOHSA-NEXT: s_mov_b32 s3, 0xf000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[8:9], s[26:27], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[6:7], s[28:29], 0x80000
; GFX6-NOHSA-NEXT: s_mov_b32 s2, -1
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v24, s8
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:208
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v22, s6
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:240
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[8:9], s[26:27], 0x80000
; GFX6-NOHSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s20
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s21
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v25, s9
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:192
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v26, s22
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v27, s23
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s24
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s25
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:144
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[20:21], s[6:7], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[22:23], s[50:51], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s22
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s23
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v23, s7
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:224
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v24, s24
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v25, s25
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s8
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s9
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:208
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[52:53], s[52:53], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[6:7], s[54:55], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[8:9], s[50:51], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[22:23], s[48:49], 0x80000
; GFX6-NOHSA-NEXT: s_bfe_i64 s[24:25], s[46:47], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[6:7], s[48:49], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[8:9], s[44:45], 0x80000
; GFX6-NOHSA-NEXT: s_bfe_i64 s[26:27], s[42:43], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[38:39], s[40:41], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[28:29], s[40:41], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000
; GFX6-NOHSA-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000
; GFX6-NOHSA-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000
; GFX6-NOHSA-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
; GFX6-NOHSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[40:41], s[16:17], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[42:43], s[14:15], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[44:45], s[12:13], 0x80000
-; GFX6-NOHSA-NEXT: s_bfe_i64 s[46:47], s[10:11], 0x80000
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:128
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[40:41], s[12:13], 0x80000
+; GFX6-NOHSA-NEXT: s_bfe_i64 s[42:43], s[10:11], 0x80000
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:192
; GFX6-NOHSA-NEXT: s_waitcnt expcnt(2)
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s46
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s47
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s44
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s45
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s44
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s45
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v16, s42
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v17, s43
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:176
+; GFX6-NOHSA-NEXT: s_waitcnt expcnt(1)
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s4
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s5
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v10, s40
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v11, s41
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:160
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s16
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s17
; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s49
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s33
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v10, s42
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v11, s43
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:64
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s40
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s41
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s18
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s19
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s18
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s19
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:144
+; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0)
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s13
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s11
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v14, s20
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v15, s21
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:128
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v10, s52
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v11, s53
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v18, s30
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v19, s31
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:112
+; GFX6-NOHSA-NEXT: s_waitcnt expcnt(1)
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v14, s14
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v15, s15
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s34
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s35
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96
; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v4, s24
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v5, s25
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v14, s28
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v15, s29
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v8, s17
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v9, s15
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v20, s30
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v21, s31
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:240
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v10, s22
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v11, s23
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v18, s34
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v19, s35
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:224
-; GFX6-NOHSA-NEXT: s_waitcnt expcnt(2)
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v14, s13
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v15, s11
; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s36
; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s37
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:176
-; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s20
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s21
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s38
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s39
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:160
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s38
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s39
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s28
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s29
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:64
; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0)
; GFX6-NOHSA-NEXT: v_mov_b32_e32 v6, s26
; GFX6-NOHSA-NEXT: v_mov_b32_e32 v7, s27
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:112
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s8
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s9
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:96
-; GFX6-NOHSA-NEXT: s_waitcnt expcnt(0)
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s6
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s7
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s4
-; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s5
-; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:48
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v12, s24
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v13, s25
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:32
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v0, s22
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v1, s23
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v2, s8
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v3, s9
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v16, s6
+; GFX6-NOHSA-NEXT: v_mov_b32_e32 v17, s7
+; GFX6-NOHSA-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0
; GFX6-NOHSA-NEXT: s_endpgm
;
; GFX7-HSA-LABEL: constant_sextload_v32i8_to_v32i64:
@@ -8368,212 +8366,211 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
; GFX7-HSA-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-HSA-NEXT: s_load_dwordx8 s[0:7], s[10:11], 0x0
; GFX7-HSA-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-HSA-NEXT: s_lshr_b32 s12, s6, 16
-; GFX7-HSA-NEXT: s_lshr_b32 s10, s6, 24
+; GFX7-HSA-NEXT: s_lshr_b32 s10, s7, 16
+; GFX7-HSA-NEXT: s_lshr_b32 s40, s7, 8
+; GFX7-HSA-NEXT: s_mov_b32 s42, s7
+; GFX7-HSA-NEXT: s_lshr_b32 s44, s6, 16
+; GFX7-HSA-NEXT: s_ashr_i32 s41, s1, 24
+; GFX7-HSA-NEXT: s_ashr_i32 s43, s3, 31
+; GFX7-HSA-NEXT: s_ashr_i32 s45, s3, 24
+; GFX7-HSA-NEXT: s_lshr_b32 s48, s6, 24
+; GFX7-HSA-NEXT: s_lshr_b32 s50, s6, 8
+; GFX7-HSA-NEXT: s_lshr_b32 s52, s5, 16
+; GFX7-HSA-NEXT: s_lshr_b32 s46, s5, 8
+; GFX7-HSA-NEXT: s_mov_b32 s54, s5
+; GFX7-HSA-NEXT: s_lshr_b32 s38, s4, 16
+; GFX7-HSA-NEXT: s_lshr_b32 s36, s4, 24
+; GFX7-HSA-NEXT: s_lshr_b32 s34, s4, 8
+; GFX7-HSA-NEXT: s_lshr_b32 s28, s3, 16
+; GFX7-HSA-NEXT: s_lshr_b32 s24, s3, 8
+; GFX7-HSA-NEXT: s_mov_b32 s26, s3
+; GFX7-HSA-NEXT: s_lshr_b32 s22, s2, 16
+; GFX7-HSA-NEXT: s_lshr_b32 s20, s2, 24
+; GFX7-HSA-NEXT: s_lshr_b32 s18, s2, 8
+; GFX7-HSA-NEXT: s_lshr_b32 s14, s1, 16
+; GFX7-HSA-NEXT: s_lshr_b32 s56, s1, 8
+; GFX7-HSA-NEXT: s_mov_b32 s12, s1
+; GFX7-HSA-NEXT: s_lshr_b32 s58, s0, 16
+; GFX7-HSA-NEXT: s_lshr_b32 s60, s0, 24
+; GFX7-HSA-NEXT: s_lshr_b32 s62, s0, 8
+; GFX7-HSA-NEXT: s_bfe_i64 s[16:17], s[2:3], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[2:3], s[10:11], 0x80000
; GFX7-HSA-NEXT: s_ashr_i32 s33, s1, 31
-; GFX7-HSA-NEXT: s_ashr_i32 s37, s1, 24
-; GFX7-HSA-NEXT: s_lshr_b32 s34, s0, 16
-; GFX7-HSA-NEXT: s_lshr_b32 s30, s0, 24
-; GFX7-HSA-NEXT: s_lshr_b32 s28, s0, 8
-; GFX7-HSA-NEXT: s_lshr_b32 s64, s1, 16
-; GFX7-HSA-NEXT: s_lshr_b32 s66, s1, 8
-; GFX7-HSA-NEXT: s_mov_b32 s68, s1
-; GFX7-HSA-NEXT: s_bfe_i64 s[22:23], s[0:1], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[0:1], s[12:13], 0x80000
-; GFX7-HSA-NEXT: s_lshr_b32 s36, s6, 8
-; GFX7-HSA-NEXT: s_lshr_b32 s40, s4, 16
-; GFX7-HSA-NEXT: s_ashr_i32 s41, s3, 31
-; GFX7-HSA-NEXT: s_lshr_b32 s50, s4, 24
-; GFX7-HSA-NEXT: s_lshr_b32 s52, s4, 8
-; GFX7-HSA-NEXT: s_lshr_b32 s54, s2, 16
-; GFX7-HSA-NEXT: s_lshr_b32 s56, s2, 24
-; GFX7-HSA-NEXT: s_lshr_b32 s42, s2, 8
-; GFX7-HSA-NEXT: s_lshr_b32 s26, s7, 16
-; GFX7-HSA-NEXT: s_lshr_b32 s20, s7, 8
-; GFX7-HSA-NEXT: s_mov_b32 s24, s7
-; GFX7-HSA-NEXT: s_lshr_b32 s18, s5, 16
-; GFX7-HSA-NEXT: s_lshr_b32 s14, s5, 8
-; GFX7-HSA-NEXT: s_mov_b32 s16, s5
-; GFX7-HSA-NEXT: s_lshr_b32 s58, s3, 16
-; GFX7-HSA-NEXT: s_lshr_b32 s60, s3, 8
-; GFX7-HSA-NEXT: s_mov_b32 s62, s3
-; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-HSA-NEXT: s_bfe_i64 s[0:1], s[10:11], 0x80000
-; GFX7-HSA-NEXT: s_ashr_i32 s44, s3, 24
-; GFX7-HSA-NEXT: s_ashr_i32 s45, s5, 31
-; GFX7-HSA-NEXT: s_ashr_i32 s46, s5, 24
-; GFX7-HSA-NEXT: s_ashr_i32 s47, s7, 31
-; GFX7-HSA-NEXT: s_ashr_i32 s48, s7, 24
-; GFX7-HSA-NEXT: s_bfe_i64 s[38:39], s[2:3], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[70:71], s[4:5], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[72:73], s[6:7], 0x80000
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-HSA-NEXT: s_bfe_i64 s[2:3], s[68:69], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[0:1], s[66:67], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[4:5], s[64:65], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[10:11], s[62:63], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[6:7], s[60:61], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[12:13], s[58:59], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000
+; GFX7-HSA-NEXT: s_ashr_i32 s66, s5, 31
+; GFX7-HSA-NEXT: s_ashr_i32 s67, s5, 24
+; GFX7-HSA-NEXT: s_ashr_i32 s68, s7, 31
+; GFX7-HSA-NEXT: s_ashr_i32 s69, s7, 24
+; GFX7-HSA-NEXT: s_bfe_i64 s[0:1], s[0:1], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[30:31], s[4:5], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[64:65], s[6:7], 0x80000
+; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-HSA-NEXT: s_bfe_i64 s[2:3], s[62:63], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[4:5], s[60:61], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[6:7], s[58:59], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[10:11], s[56:57], 0x80000
; GFX7-HSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
; GFX7-HSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
; GFX7-HSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
; GFX7-HSA-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
; GFX7-HSA-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000
; GFX7-HSA-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[56:57], s[56:57], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000
; GFX7-HSA-NEXT: s_bfe_i64 s[54:55], s[54:55], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x80000
; GFX7-HSA-NEXT: s_bfe_i64 s[52:53], s[52:53], 0x80000
; GFX7-HSA-NEXT: s_bfe_i64 s[50:51], s[50:51], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[58:59], s[40:41], 0x80000
-; GFX7-HSA-NEXT: s_bfe_i64 s[60:61], s[36:37], 0x80000
-; GFX7-HSA-NEXT: s_add_u32 s62, s8, 0xd0
+; GFX7-HSA-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[56:57], s[44:45], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[58:59], s[42:43], 0x80000
+; GFX7-HSA-NEXT: s_bfe_i64 s[60:61], s[40:41], 0x80000
+; GFX7-HSA-NEXT: s_add_u32 s62, s8, 0xf0
; GFX7-HSA-NEXT: s_addc_u32 s63, s9, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s58
+; GFX7-HSA-NEXT: s_add_u32 s58, s8, 0xe0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s59
+; GFX7-HSA-NEXT: s_addc_u32 s59, s9, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v10, s48
+; GFX7-HSA-NEXT: s_add_u32 s48, s8, 0xd0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v11, s49
+; GFX7-HSA-NEXT: s_addc_u32 s49, s9, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v26, s48
+; GFX7-HSA-NEXT: v_mov_b32_e32 v27, s49
+; GFX7-HSA-NEXT: s_add_u32 s48, s8, 0xc0
+; GFX7-HSA-NEXT: s_addc_u32 s49, s9, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v28, s48
+; GFX7-HSA-NEXT: v_mov_b32_e32 v18, s62
+; GFX7-HSA-NEXT: v_mov_b32_e32 v29, s49
+; GFX7-HSA-NEXT: s_add_u32 s48, s8, 0xb0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v19, s63
+; GFX7-HSA-NEXT: s_addc_u32 s49, s9, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s69
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s68
+; GFX7-HSA-NEXT: v_mov_b32_e32 v24, s58
+; GFX7-HSA-NEXT: flat_store_dwordx4 v[18:19], v[0:3]
; GFX7-HSA-NEXT: v_mov_b32_e32 v6, s60
-; GFX7-HSA-NEXT: s_add_u32 s60, s8, 0xc0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s38
+; GFX7-HSA-NEXT: s_add_u32 s38, s8, 0xa0
; GFX7-HSA-NEXT: v_mov_b32_e32 v7, s61
-; GFX7-HSA-NEXT: s_addc_u32 s61, s9, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v10, s50
-; GFX7-HSA-NEXT: s_add_u32 s50, s8, 0x90
-; GFX7-HSA-NEXT: v_mov_b32_e32 v11, s51
-; GFX7-HSA-NEXT: s_addc_u32 s51, s9, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v26, s50
-; GFX7-HSA-NEXT: v_mov_b32_e32 v27, s51
-; GFX7-HSA-NEXT: s_add_u32 s50, s8, 0x80
-; GFX7-HSA-NEXT: s_addc_u32 s51, s9, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v22, s62
-; GFX7-HSA-NEXT: v_mov_b32_e32 v20, s38
-; GFX7-HSA-NEXT: s_add_u32 s38, s8, 0x50
-; GFX7-HSA-NEXT: v_mov_b32_e32 v23, s63
-; GFX7-HSA-NEXT: v_mov_b32_e32 v21, s39
+; GFX7-HSA-NEXT: v_mov_b32_e32 v25, s59
+; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s39
; GFX7-HSA-NEXT: s_addc_u32 s39, s9, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v24, s60
-; GFX7-HSA-NEXT: flat_store_dwordx4 v[22:23], v[0:3]
-; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s72
-; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s34
-; GFX7-HSA-NEXT: s_add_u32 s34, s8, 64
-; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s73
-; GFX7-HSA-NEXT: v_mov_b32_e32 v25, s61
-; GFX7-HSA-NEXT: v_mov_b32_e32 v30, s38
-; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s35
-; GFX7-HSA-NEXT: s_addc_u32 s35, s9, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v8, s58
-; GFX7-HSA-NEXT: v_mov_b32_e32 v9, s59
-; GFX7-HSA-NEXT: v_mov_b32_e32 v16, s54
-; GFX7-HSA-NEXT: v_mov_b32_e32 v17, s55
-; GFX7-HSA-NEXT: v_mov_b32_e32 v18, s56
-; GFX7-HSA-NEXT: v_mov_b32_e32 v19, s57
-; GFX7-HSA-NEXT: v_mov_b32_e32 v31, s39
; GFX7-HSA-NEXT: flat_store_dwordx4 v[24:25], v[4:7]
+; GFX7-HSA-NEXT: v_mov_b32_e32 v8, s56
+; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s30
+; GFX7-HSA-NEXT: s_add_u32 s30, s8, 0x90
+; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s31
+; GFX7-HSA-NEXT: s_addc_u32 s31, s9, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v24, s30
+; GFX7-HSA-NEXT: v_mov_b32_e32 v25, s31
+; GFX7-HSA-NEXT: s_add_u32 s30, s8, 0x80
+; GFX7-HSA-NEXT: v_mov_b32_e32 v9, s57
+; GFX7-HSA-NEXT: s_addc_u32 s31, s9, 0
; GFX7-HSA-NEXT: flat_store_dwordx4 v[26:27], v[8:11]
-; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s22
-; GFX7-HSA-NEXT: s_add_u32 s22, s8, 16
-; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s23
-; GFX7-HSA-NEXT: s_addc_u32 s23, s9, 0
-; GFX7-HSA-NEXT: flat_store_dwordx4 v[30:31], v[16:19]
-; GFX7-HSA-NEXT: v_mov_b32_e32 v24, s34
-; GFX7-HSA-NEXT: v_mov_b32_e32 v16, s22
-; GFX7-HSA-NEXT: v_mov_b32_e32 v17, s23
-; GFX7-HSA-NEXT: s_add_u32 s22, s8, 0xf0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s30
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s31
-; GFX7-HSA-NEXT: s_addc_u32 s23, s9, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v18, s22
-; GFX7-HSA-NEXT: v_mov_b32_e32 v28, s50
-; GFX7-HSA-NEXT: v_mov_b32_e32 v22, s42
-; GFX7-HSA-NEXT: v_mov_b32_e32 v23, s43
-; GFX7-HSA-NEXT: v_mov_b32_e32 v25, s35
-; GFX7-HSA-NEXT: v_mov_b32_e32 v19, s23
-; GFX7-HSA-NEXT: s_add_u32 s22, s8, 0xe0
-; GFX7-HSA-NEXT: flat_store_dwordx4 v[16:17], v[0:3]
-; GFX7-HSA-NEXT: v_mov_b32_e32 v12, s70
-; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s8
-; GFX7-HSA-NEXT: v_mov_b32_e32 v13, s71
-; GFX7-HSA-NEXT: v_mov_b32_e32 v14, s52
-; GFX7-HSA-NEXT: v_mov_b32_e32 v15, s53
-; GFX7-HSA-NEXT: v_mov_b32_e32 v29, s51
-; GFX7-HSA-NEXT: v_mov_b32_e32 v6, s28
-; GFX7-HSA-NEXT: v_mov_b32_e32 v7, s29
-; GFX7-HSA-NEXT: flat_store_dwordx4 v[24:25], v[20:23]
-; GFX7-HSA-NEXT: s_addc_u32 s23, s9, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v20, s22
-; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s9
+; GFX7-HSA-NEXT: v_mov_b32_e32 v12, s64
+; GFX7-HSA-NEXT: v_mov_b32_e32 v8, s28
+; GFX7-HSA-NEXT: s_add_u32 s28, s8, 0x70
+; GFX7-HSA-NEXT: v_mov_b32_e32 v13, s65
+; GFX7-HSA-NEXT: v_mov_b32_e32 v14, s50
+; GFX7-HSA-NEXT: v_mov_b32_e32 v15, s51
+; GFX7-HSA-NEXT: v_mov_b32_e32 v30, s48
+; GFX7-HSA-NEXT: v_mov_b32_e32 v9, s29
+; GFX7-HSA-NEXT: s_addc_u32 s29, s9, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v16, s52
+; GFX7-HSA-NEXT: v_mov_b32_e32 v17, s53
+; GFX7-HSA-NEXT: v_mov_b32_e32 v31, s49
+; GFX7-HSA-NEXT: v_mov_b32_e32 v18, s67
+; GFX7-HSA-NEXT: v_mov_b32_e32 v19, s66
+; GFX7-HSA-NEXT: v_mov_b32_e32 v10, s38
; GFX7-HSA-NEXT: flat_store_dwordx4 v[28:29], v[12:15]
-; GFX7-HSA-NEXT: v_mov_b32_e32 v8, s26
-; GFX7-HSA-NEXT: v_mov_b32_e32 v9, s27
-; GFX7-HSA-NEXT: v_mov_b32_e32 v10, s48
-; GFX7-HSA-NEXT: v_mov_b32_e32 v11, s47
-; GFX7-HSA-NEXT: v_mov_b32_e32 v12, s24
-; GFX7-HSA-NEXT: v_mov_b32_e32 v13, s25
-; GFX7-HSA-NEXT: v_mov_b32_e32 v21, s23
-; GFX7-HSA-NEXT: v_mov_b32_e32 v14, s20
-; GFX7-HSA-NEXT: v_mov_b32_e32 v15, s21
-; GFX7-HSA-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
-; GFX7-HSA-NEXT: flat_store_dwordx4 v[18:19], v[8:11]
-; GFX7-HSA-NEXT: flat_store_dwordx4 v[20:21], v[12:15]
-; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s18
-; GFX7-HSA-NEXT: s_add_u32 s18, s8, 0xb0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s19
-; GFX7-HSA-NEXT: s_addc_u32 s19, s9, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s18
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s46
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s45
-; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s19
+; GFX7-HSA-NEXT: v_mov_b32_e32 v20, s54
+; GFX7-HSA-NEXT: v_mov_b32_e32 v14, s24
+; GFX7-HSA-NEXT: s_add_u32 s24, s8, 0x60
+; GFX7-HSA-NEXT: v_mov_b32_e32 v21, s55
+; GFX7-HSA-NEXT: v_mov_b32_e32 v22, s46
+; GFX7-HSA-NEXT: v_mov_b32_e32 v23, s47
+; GFX7-HSA-NEXT: v_mov_b32_e32 v11, s39
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s36
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s37
+; GFX7-HSA-NEXT: v_mov_b32_e32 v26, s30
+; GFX7-HSA-NEXT: flat_store_dwordx4 v[30:31], v[16:19]
+; GFX7-HSA-NEXT: v_mov_b32_e32 v15, s25
+; GFX7-HSA-NEXT: v_mov_b32_e32 v16, s28
+; GFX7-HSA-NEXT: s_addc_u32 s25, s9, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v18, s24
+; GFX7-HSA-NEXT: v_mov_b32_e32 v27, s31
+; GFX7-HSA-NEXT: v_mov_b32_e32 v6, s34
+; GFX7-HSA-NEXT: v_mov_b32_e32 v7, s35
+; GFX7-HSA-NEXT: flat_store_dwordx4 v[10:11], v[20:23]
+; GFX7-HSA-NEXT: v_mov_b32_e32 v12, s26
+; GFX7-HSA-NEXT: v_mov_b32_e32 v10, s45
+; GFX7-HSA-NEXT: v_mov_b32_e32 v11, s43
+; GFX7-HSA-NEXT: v_mov_b32_e32 v13, s27
+; GFX7-HSA-NEXT: v_mov_b32_e32 v17, s29
+; GFX7-HSA-NEXT: v_mov_b32_e32 v19, s25
+; GFX7-HSA-NEXT: flat_store_dwordx4 v[24:25], v[0:3]
+; GFX7-HSA-NEXT: flat_store_dwordx4 v[26:27], v[4:7]
+; GFX7-HSA-NEXT: flat_store_dwordx4 v[16:17], v[8:11]
+; GFX7-HSA-NEXT: flat_store_dwordx4 v[18:19], v[12:15]
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s20
+; GFX7-HSA-NEXT: s_add_u32 s20, s8, 0x50
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s21
+; GFX7-HSA-NEXT: s_addc_u32 s21, s9, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s20
+; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s22
+; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s23
+; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s21
; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX7-HSA-NEXT: s_nop 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s14
-; GFX7-HSA-NEXT: s_add_u32 s14, s8, 0xa0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s15
-; GFX7-HSA-NEXT: s_addc_u32 s15, s9, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s14
; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s16
+; GFX7-HSA-NEXT: s_add_u32 s16, s8, 64
; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s17
+; GFX7-HSA-NEXT: s_addc_u32 s17, s9, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s16
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s18
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s19
+; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s17
+; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GFX7-HSA-NEXT: s_nop 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s14
+; GFX7-HSA-NEXT: s_add_u32 s14, s8, 48
+; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s15
+; GFX7-HSA-NEXT: s_addc_u32 s15, s9, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s14
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s41
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s33
; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s15
; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX7-HSA-NEXT: s_nop 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s10
+; GFX7-HSA-NEXT: s_add_u32 s10, s8, 32
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s11
+; GFX7-HSA-NEXT: s_addc_u32 s11, s9, 0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s10
; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s12
-; GFX7-HSA-NEXT: s_add_u32 s12, s8, 0x70
; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s13
-; GFX7-HSA-NEXT: s_addc_u32 s13, s9, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s12
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s44
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s41
-; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s13
+; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s11
; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX7-HSA-NEXT: s_nop 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s6
-; GFX7-HSA-NEXT: s_add_u32 s6, s8, 0x60
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s7
-; GFX7-HSA-NEXT: s_addc_u32 s7, s9, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s6
-; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s10
-; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s11
-; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s7
-; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX7-HSA-NEXT: s_nop 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-HSA-NEXT: s_add_u32 s4, s8, 48
-; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s4
+; GFX7-HSA-NEXT: s_add_u32 s4, s8, 16
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s5
; GFX7-HSA-NEXT: s_addc_u32 s5, s9, 0
; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s4
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s37
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s33
+; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s6
+; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s7
; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s5
; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX7-HSA-NEXT: s_nop 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-HSA-NEXT: s_add_u32 s0, s8, 32
-; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-HSA-NEXT: s_addc_u32 s1, s9, 0
-; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s1
-; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v4, s8
+; GFX7-HSA-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-HSA-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-HSA-NEXT: v_mov_b32_e32 v2, s2
+; GFX7-HSA-NEXT: v_mov_b32_e32 v3, s3
+; GFX7-HSA-NEXT: v_mov_b32_e32 v5, s9
; GFX7-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX7-HSA-NEXT: s_endpgm
;
@@ -8583,175 +8580,140 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
; GFX8-NOHSA-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NOHSA-NEXT: s_load_dwordx8 s[0:7], s[10:11], 0x0
; GFX8-NOHSA-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NOHSA-NEXT: s_lshr_b32 s46, s6, 16
-; GFX8-NOHSA-NEXT: s_lshr_b32 s48, s6, 24
-; GFX8-NOHSA-NEXT: s_lshr_b32 s50, s6, 8
-; GFX8-NOHSA-NEXT: s_lshr_b32 s52, s4, 16
-; GFX8-NOHSA-NEXT: s_lshr_b32 s54, s4, 24
-; GFX8-NOHSA-NEXT: s_lshr_b32 s56, s4, 8
-; GFX8-NOHSA-NEXT: s_lshr_b32 s58, s2, 16
-; GFX8-NOHSA-NEXT: s_lshr_b32 s60, s2, 24
-; GFX8-NOHSA-NEXT: s_lshr_b32 s40, s2, 8
-; GFX8-NOHSA-NEXT: s_lshr_b32 s36, s0, 16
-; GFX8-NOHSA-NEXT: s_lshr_b32 s34, s0, 24
-; GFX8-NOHSA-NEXT: s_lshr_b32 s28, s0, 8
-; GFX8-NOHSA-NEXT: s_lshr_b32 s62, s7, 16
-; GFX8-NOHSA-NEXT: s_lshr_b32 s24, s7, 8
-; GFX8-NOHSA-NEXT: s_mov_b32 s22, s7
-; GFX8-NOHSA-NEXT: s_lshr_b32 s64, s5, 16
-; GFX8-NOHSA-NEXT: s_lshr_b32 s20, s5, 8
-; GFX8-NOHSA-NEXT: s_mov_b32 s18, s5
-; GFX8-NOHSA-NEXT: s_lshr_b32 s66, s3, 16
-; GFX8-NOHSA-NEXT: s_lshr_b32 s16, s3, 8
-; GFX8-NOHSA-NEXT: s_mov_b32 s14, s3
-; GFX8-NOHSA-NEXT: s_lshr_b32 s44, s1, 16
-; GFX8-NOHSA-NEXT: s_lshr_b32 s12, s1, 8
-; GFX8-NOHSA-NEXT: s_mov_b32 s10, s1
-; GFX8-NOHSA-NEXT: s_ashr_i32 s63, s5, 24
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[26:27], s[0:1], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[30:31], s[2:3], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[38:39], s[4:5], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[68:69], s[6:7], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GFX8-NOHSA-NEXT: s_lshr_b32 s50, s7, 16
+; GFX8-NOHSA-NEXT: s_lshr_b32 s52, s7, 8
+; GFX8-NOHSA-NEXT: s_mov_b32 s54, s7
+; GFX8-NOHSA-NEXT: s_lshr_b32 s56, s6, 16
+; GFX8-NOHSA-NEXT: s_lshr_b32 s58, s6, 24
+; GFX8-NOHSA-NEXT: s_lshr_b32 s60, s6, 8
+; GFX8-NOHSA-NEXT: s_lshr_b32 s62, s5, 16
+; GFX8-NOHSA-NEXT: s_lshr_b32 s48, s5, 8
+; GFX8-NOHSA-NEXT: s_mov_b32 s46, s5
+; GFX8-NOHSA-NEXT: s_lshr_b32 s42, s4, 16
+; GFX8-NOHSA-NEXT: s_lshr_b32 s40, s4, 24
+; GFX8-NOHSA-NEXT: s_lshr_b32 s38, s4, 8
+; GFX8-NOHSA-NEXT: s_lshr_b32 s64, s3, 16
+; GFX8-NOHSA-NEXT: s_lshr_b32 s34, s3, 8
+; GFX8-NOHSA-NEXT: s_mov_b32 s30, s3
+; GFX8-NOHSA-NEXT: s_lshr_b32 s28, s2, 16
+; GFX8-NOHSA-NEXT: s_lshr_b32 s26, s2, 24
+; GFX8-NOHSA-NEXT: s_lshr_b32 s24, s2, 8
+; GFX8-NOHSA-NEXT: s_lshr_b32 s66, s1, 16
+; GFX8-NOHSA-NEXT: s_lshr_b32 s20, s1, 8
+; GFX8-NOHSA-NEXT: s_mov_b32 s18, s1
+; GFX8-NOHSA-NEXT: s_lshr_b32 s16, s0, 16
+; GFX8-NOHSA-NEXT: s_lshr_b32 s14, s0, 24
+; GFX8-NOHSA-NEXT: s_lshr_b32 s12, s0, 8
+; GFX8-NOHSA-NEXT: s_ashr_i32 s65, s3, 24
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[10:11], s[0:1], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[22:23], s[2:3], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[36:37], s[4:5], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[44:45], s[6:7], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
-; GFX8-NOHSA-NEXT: s_ashr_i32 s33, s1, 31
-; GFX8-NOHSA-NEXT: s_ashr_i32 s42, s1, 24
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[0:1], s[44:45], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000
-; GFX8-NOHSA-NEXT: s_ashr_i32 s43, s3, 31
-; GFX8-NOHSA-NEXT: s_ashr_i32 s44, s3, 24
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[2:3], s[66:67], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
-; GFX8-NOHSA-NEXT: s_ashr_i32 s45, s5, 31
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[4:5], s[64:65], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
+; GFX8-NOHSA-NEXT: s_ashr_i32 s4, s1, 31
+; GFX8-NOHSA-NEXT: s_ashr_i32 s6, s1, 24
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[0:1], s[66:67], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
-; GFX8-NOHSA-NEXT: s_ashr_i32 s64, s7, 31
-; GFX8-NOHSA-NEXT: s_ashr_i32 s65, s7, 24
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[6:7], s[62:63], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000
+; GFX8-NOHSA-NEXT: s_ashr_i32 s33, s3, 31
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[2:3], s[64:65], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x80000
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x80000
+; GFX8-NOHSA-NEXT: s_ashr_i32 s64, s5, 31
+; GFX8-NOHSA-NEXT: s_ashr_i32 s5, s5, 24
+; GFX8-NOHSA-NEXT: s_bfe_i64 s[62:63], s[62:63], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[60:61], s[60:61], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[58:59], s[58:59], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[56:57], s[56:57], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[54:55], s[54:55], 0x80000
; GFX8-NOHSA-NEXT: s_bfe_i64 s[52:53], s[52:53], 0x80000
+; GFX8-NOHSA-NEXT: s_ashr_i32 s66, s7, 31
+; GFX8-NOHSA-NEXT: s_ashr_i32 s7, s7, 24
; GFX8-NOHSA-NEXT: s_bfe_i64 s[50:51], s[50:51], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x80000
-; GFX8-NOHSA-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x80000
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s46
-; GFX8-NOHSA-NEXT: s_add_u32 s46, s8, 0xd0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s47
-; GFX8-NOHSA-NEXT: s_addc_u32 s47, s9, 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s46
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s48
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s49
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s47
-; GFX8-NOHSA-NEXT: s_add_u32 s46, s8, 0xc0
-; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT: s_addc_u32 s47, s9, 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s46
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s68
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s69
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s50
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s51
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s47
-; GFX8-NOHSA-NEXT: s_add_u32 s46, s8, 0x90
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s50
+; GFX8-NOHSA-NEXT: s_add_u32 s50, s8, 0xf0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s51
+; GFX8-NOHSA-NEXT: s_addc_u32 s51, s9, 0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s50
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s7
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s66
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s51
+; GFX8-NOHSA-NEXT: s_add_u32 s50, s8, 0xe0
+; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT: s_addc_u32 s51, s9, 0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s50
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s54
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s55
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s52
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s53
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s51
+; GFX8-NOHSA-NEXT: s_add_u32 s50, s8, 0xd0
; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT: s_addc_u32 s47, s9, 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s46
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s52
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s53
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s54
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s55
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s47
+; GFX8-NOHSA-NEXT: s_addc_u32 s51, s9, 0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s50
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s56
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s57
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s58
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s59
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s51
; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX8-NOHSA-NEXT: s_nop 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s38
-; GFX8-NOHSA-NEXT: s_add_u32 s38, s8, 0x80
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s39
-; GFX8-NOHSA-NEXT: s_addc_u32 s39, s9, 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s38
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s56
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s57
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s39
-; GFX8-NOHSA-NEXT: s_add_u32 s38, s8, 0x50
-; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT: s_addc_u32 s39, s9, 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s38
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s58
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s59
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s44
+; GFX8-NOHSA-NEXT: s_add_u32 s44, s8, 0xc0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s45
+; GFX8-NOHSA-NEXT: s_addc_u32 s45, s9, 0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s44
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s60
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s61
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s39
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s45
+; GFX8-NOHSA-NEXT: s_add_u32 s44, s8, 0xb0
+; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT: s_addc_u32 s45, s9, 0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s44
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s62
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s63
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s5
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s64
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s45
+; GFX8-NOHSA-NEXT: s_add_u32 s44, s8, 0xa0
+; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT: s_addc_u32 s45, s9, 0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s44
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s46
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s47
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s48
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s49
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s45
; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX8-NOHSA-NEXT: s_nop 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s30
-; GFX8-NOHSA-NEXT: s_add_u32 s30, s8, 64
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s31
-; GFX8-NOHSA-NEXT: s_addc_u32 s31, s9, 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s30
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s40
+; GFX8-NOHSA-NEXT: s_add_u32 s40, s8, 0x90
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s41
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s31
-; GFX8-NOHSA-NEXT: s_add_u32 s30, s8, 16
+; GFX8-NOHSA-NEXT: s_addc_u32 s41, s9, 0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s40
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s42
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s43
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s41
; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT: s_addc_u32 s31, s9, 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s30
+; GFX8-NOHSA-NEXT: s_nop 0
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s36
+; GFX8-NOHSA-NEXT: s_add_u32 s36, s8, 0x80
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s37
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s34
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s35
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s31
-; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s8
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s26
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s27
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s28
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s29
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s9
-; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT: s_nop 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s6
-; GFX8-NOHSA-NEXT: s_add_u32 s6, s8, 0xf0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s7
-; GFX8-NOHSA-NEXT: s_addc_u32 s7, s9, 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s6
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s65
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s64
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s7
-; GFX8-NOHSA-NEXT: s_add_u32 s6, s8, 0xe0
-; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT: s_addc_u32 s7, s9, 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s6
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s22
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s23
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s24
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s25
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s7
-; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT: s_nop 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NOHSA-NEXT: s_add_u32 s4, s8, 0xb0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NOHSA-NEXT: s_addc_u32 s5, s9, 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s4
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s63
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s45
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s5
-; GFX8-NOHSA-NEXT: s_add_u32 s4, s8, 0xa0
-; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NOHSA-NEXT: s_addc_u32 s5, s9, 0
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s4
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s18
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s19
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s20
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s21
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s5
+; GFX8-NOHSA-NEXT: s_addc_u32 s37, s9, 0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s36
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s38
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s39
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s37
; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX8-NOHSA-NEXT: s_nop 0
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s2
@@ -8761,15 +8723,33 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s3
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s2
; GFX8-NOHSA-NEXT: s_add_u32 s2, s8, 0x60
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s44
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s43
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s65
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s33
; GFX8-NOHSA-NEXT: s_addc_u32 s3, s9, 0
; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s3
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s14
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s15
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s16
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s17
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s2
+; GFX8-NOHSA-NEXT: s_add_u32 s2, s8, 0x50
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s30
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s31
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s34
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s35
+; GFX8-NOHSA-NEXT: s_addc_u32 s3, s9, 0
+; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s3
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s2
+; GFX8-NOHSA-NEXT: s_add_u32 s2, s8, 64
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s28
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s29
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s26
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s27
+; GFX8-NOHSA-NEXT: s_addc_u32 s3, s9, 0
+; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s3
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s22
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s23
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s24
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s25
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s2
; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX8-NOHSA-NEXT: s_nop 0
@@ -8780,16 +8760,32 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s1
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s0
; GFX8-NOHSA-NEXT: s_add_u32 s0, s8, 32
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s42
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s33
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s6
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s4
+; GFX8-NOHSA-NEXT: s_addc_u32 s1, s9, 0
+; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NOHSA-NEXT: s_add_u32 s0, s8, 16
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s18
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s19
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s20
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s21
; GFX8-NOHSA-NEXT: s_addc_u32 s1, s9, 0
; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s16
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s17
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s14
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s15
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s8
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v0, s10
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v1, s11
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v2, s12
; GFX8-NOHSA-NEXT: v_mov_b32_e32 v3, s13
-; GFX8-NOHSA-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NOHSA-NEXT: v_mov_b32_e32 v5, s9
; GFX8-NOHSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX8-NOHSA-NEXT: s_endpgm
;
@@ -8988,120 +8984,122 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_load_b256 s[0:7], s[10:11], 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_lshr_b32 s34, s6, 16
-; GFX12-NEXT: s_lshr_b32 s36, s6, 24
-; GFX12-NEXT: s_lshr_b32 s38, s6, 8
-; GFX12-NEXT: s_lshr_b32 s40, s4, 16
-; GFX12-NEXT: s_lshr_b32 s42, s4, 24
-; GFX12-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000
-; GFX12-NEXT: s_lshr_b32 s44, s4, 8
+; GFX12-NEXT: s_lshr_b32 s40, s7, 16
+; GFX12-NEXT: s_lshr_b32 s50, s6, 8
+; GFX12-NEXT: s_lshr_b32 s62, s3, 16
+; GFX12-NEXT: s_ashr_i32 s51, s3, 24
+; GFX12-NEXT: s_lshr_b32 s42, s7, 8
+; GFX12-NEXT: s_mov_b32 s44, s7
+; GFX12-NEXT: s_lshr_b32 s46, s6, 16
+; GFX12-NEXT: s_lshr_b32 s48, s6, 24
+; GFX12-NEXT: s_lshr_b32 s38, s5, 16
+; GFX12-NEXT: s_lshr_b32 s52, s5, 8
+; GFX12-NEXT: s_mov_b32 s54, s5
+; GFX12-NEXT: s_lshr_b32 s56, s4, 16
+; GFX12-NEXT: s_lshr_b32 s58, s4, 24
+; GFX12-NEXT: s_lshr_b32 s60, s4, 8
+; GFX12-NEXT: s_lshr_b32 s36, s3, 8
+; GFX12-NEXT: s_mov_b32 s34, s3
+; GFX12-NEXT: s_lshr_b32 s28, s2, 16
+; GFX12-NEXT: s_lshr_b32 s26, s2, 24
+; GFX12-NEXT: s_lshr_b32 s24, s2, 8
+; GFX12-NEXT: s_bfe_i64 s[20:21], s[2:3], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[30:31], s[4:5], 0x80000
; GFX12-NEXT: s_bfe_i64 s[66:67], s[6:7], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v24, 0 :: v_dual_mov_b32 v1, s35
-; GFX12-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000
+; GFX12-NEXT: s_ashr_i32 s39, s3, 31
+; GFX12-NEXT: s_bfe_i64 s[2:3], s[62:63], 0x80000
+; GFX12-NEXT: s_ashr_i32 s62, s5, 31
+; GFX12-NEXT: s_ashr_i32 s63, s5, 24
+; GFX12-NEXT: s_bfe_i64 s[4:5], s[50:51], 0x80000
+; GFX12-NEXT: s_ashr_i32 s50, s7, 31
; GFX12-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v0, s34 :: v_dual_mov_b32 v3, s37
-; GFX12-NEXT: v_dual_mov_b32 v2, s36 :: v_dual_mov_b32 v5, s67
-; GFX12-NEXT: s_lshr_b32 s28, s2, 16
-; GFX12-NEXT: s_lshr_b32 s46, s2, 24
-; GFX12-NEXT: s_bfe_i64 s[64:65], s[4:5], 0x80000
+; GFX12-NEXT: s_ashr_i32 s7, s7, 24
; GFX12-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v4, s66 :: v_dual_mov_b32 v7, s39
-; GFX12-NEXT: v_dual_mov_b32 v6, s38 :: v_dual_mov_b32 v9, s41
-; GFX12-NEXT: s_lshr_b32 s48, s2, 8
-; GFX12-NEXT: v_dual_mov_b32 v8, s40 :: v_dual_mov_b32 v11, s43
-; GFX12-NEXT: v_dual_mov_b32 v10, s42 :: v_dual_mov_b32 v13, s65
-; GFX12-NEXT: s_lshr_b32 s50, s0, 16
-; GFX12-NEXT: s_lshr_b32 s52, s0, 24
-; GFX12-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v12, s64 :: v_dual_mov_b32 v15, s45
-; GFX12-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000
-; GFX12-NEXT: v_mov_b32_e32 v14, s44
-; GFX12-NEXT: s_lshr_b32 s54, s0, 8
-; GFX12-NEXT: s_bfe_i64 s[30:31], s[2:3], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000
+; GFX12-NEXT: v_dual_mov_b32 v24, 0 :: v_dual_mov_b32 v1, s41
; GFX12-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x80000
-; GFX12-NEXT: s_lshr_b32 s56, s7, 16
-; GFX12-NEXT: s_lshr_b32 s58, s5, 16
-; GFX12-NEXT: s_lshr_b32 s60, s1, 8
-; GFX12-NEXT: s_mov_b32 s62, s1
-; GFX12-NEXT: s_ashr_i32 s57, s1, 24
-; GFX12-NEXT: s_ashr_i32 s59, s3, 31
-; GFX12-NEXT: s_ashr_i32 s61, s3, 24
-; GFX12-NEXT: s_ashr_i32 s63, s5, 31
+; GFX12-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x80000
+; GFX12-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v3, s50
+; GFX12-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v5, s45
+; GFX12-NEXT: v_dual_mov_b32 v4, s44 :: v_dual_mov_b32 v7, s43
+; GFX12-NEXT: v_dual_mov_b32 v6, s42 :: v_dual_mov_b32 v9, s47
+; GFX12-NEXT: v_dual_mov_b32 v8, s46 :: v_dual_mov_b32 v11, s49
+; GFX12-NEXT: v_dual_mov_b32 v10, s48 :: v_dual_mov_b32 v13, s67
+; GFX12-NEXT: v_dual_mov_b32 v12, s66 :: v_dual_mov_b32 v15, s5
+; GFX12-NEXT: v_mov_b32_e32 v14, s4
+; GFX12-NEXT: s_bfe_i64 s[4:5], s[38:39], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[54:55], s[54:55], 0x80000
; GFX12-NEXT: s_bfe_i64 s[52:53], s[52:53], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[50:51], s[50:51], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[58:59], s[58:59], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[56:57], s[56:57], 0x80000
; GFX12-NEXT: s_clause 0x3
-; GFX12-NEXT: global_store_b128 v24, v[0:3], s[8:9] offset:208
-; GFX12-NEXT: global_store_b128 v24, v[4:7], s[8:9] offset:192
+; GFX12-NEXT: global_store_b128 v24, v[0:3], s[8:9] offset:240
+; GFX12-NEXT: global_store_b128 v24, v[4:7], s[8:9] offset:224
+; GFX12-NEXT: global_store_b128 v24, v[8:11], s[8:9] offset:208
+; GFX12-NEXT: global_store_b128 v24, v[12:15], s[8:9] offset:192
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v3, s62
+; GFX12-NEXT: v_dual_mov_b32 v1, s5 :: v_dual_mov_b32 v2, s63
+; GFX12-NEXT: v_mov_b32_e32 v5, s55
+; GFX12-NEXT: s_bfe_i64 s[60:61], s[60:61], 0x80000
+; GFX12-NEXT: v_dual_mov_b32 v4, s54 :: v_dual_mov_b32 v7, s53
+; GFX12-NEXT: v_dual_mov_b32 v6, s52 :: v_dual_mov_b32 v9, s57
+; GFX12-NEXT: v_dual_mov_b32 v8, s56 :: v_dual_mov_b32 v11, s59
+; GFX12-NEXT: v_dual_mov_b32 v10, s58 :: v_dual_mov_b32 v13, s31
+; GFX12-NEXT: s_lshr_b32 s22, s1, 16
+; GFX12-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000
+; GFX12-NEXT: v_dual_mov_b32 v12, s30 :: v_dual_mov_b32 v15, s61
+; GFX12-NEXT: v_dual_mov_b32 v14, s60 :: v_dual_mov_b32 v17, s3
+; GFX12-NEXT: s_lshr_b32 s16, s1, 8
+; GFX12-NEXT: s_mov_b32 s18, s1
+; GFX12-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
+; GFX12-NEXT: v_dual_mov_b32 v16, s2 :: v_dual_mov_b32 v19, s39
+; GFX12-NEXT: v_dual_mov_b32 v18, s51 :: v_dual_mov_b32 v21, s35
+; GFX12-NEXT: s_lshr_b32 s14, s0, 16
+; GFX12-NEXT: s_lshr_b32 s12, s0, 24
+; GFX12-NEXT: s_ashr_i32 s6, s1, 31
+; GFX12-NEXT: s_ashr_i32 s33, s1, 24
+; GFX12-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
+; GFX12-NEXT: v_dual_mov_b32 v20, s34 :: v_dual_mov_b32 v23, s37
+; GFX12-NEXT: v_mov_b32_e32 v22, s36
+; GFX12-NEXT: s_clause 0x5
+; GFX12-NEXT: global_store_b128 v24, v[0:3], s[8:9] offset:176
+; GFX12-NEXT: global_store_b128 v24, v[4:7], s[8:9] offset:160
; GFX12-NEXT: global_store_b128 v24, v[8:11], s[8:9] offset:144
; GFX12-NEXT: global_store_b128 v24, v[12:15], s[8:9] offset:128
-; GFX12-NEXT: v_dual_mov_b32 v0, s28 :: v_dual_mov_b32 v3, s47
-; GFX12-NEXT: v_dual_mov_b32 v1, s29 :: v_dual_mov_b32 v2, s46
-; GFX12-NEXT: v_mov_b32_e32 v5, s31
-; GFX12-NEXT: s_lshr_b32 s26, s7, 8
-; GFX12-NEXT: s_mov_b32 s24, s7
-; GFX12-NEXT: s_bfe_i64 s[22:23], s[0:1], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[54:55], s[54:55], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v4, s30 :: v_dual_mov_b32 v7, s49
-; GFX12-NEXT: v_dual_mov_b32 v6, s48 :: v_dual_mov_b32 v9, s51
-; GFX12-NEXT: s_lshr_b32 s18, s5, 8
-; GFX12-NEXT: s_mov_b32 s20, s5
-; GFX12-NEXT: s_lshr_b32 s16, s3, 16
-; GFX12-NEXT: s_lshr_b32 s12, s3, 8
-; GFX12-NEXT: s_mov_b32 s14, s3
-; GFX12-NEXT: s_lshr_b32 s10, s1, 16
-; GFX12-NEXT: s_ashr_i32 s33, s1, 31
-; GFX12-NEXT: s_bfe_i64 s[2:3], s[62:63], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[0:1], s[60:61], 0x80000
-; GFX12-NEXT: s_ashr_i32 s60, s5, 24
-; GFX12-NEXT: s_bfe_i64 s[4:5], s[58:59], 0x80000
-; GFX12-NEXT: s_ashr_i32 s58, s7, 31
-; GFX12-NEXT: s_ashr_i32 s62, s7, 24
-; GFX12-NEXT: s_bfe_i64 s[6:7], s[56:57], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v8, s50 :: v_dual_mov_b32 v11, s53
-; GFX12-NEXT: v_dual_mov_b32 v10, s52 :: v_dual_mov_b32 v13, s23
-; GFX12-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v15, s55
-; GFX12-NEXT: v_dual_mov_b32 v14, s54 :: v_dual_mov_b32 v17, s7
-; GFX12-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
+; GFX12-NEXT: global_store_b128 v24, v[16:19], s[8:9] offset:112
+; GFX12-NEXT: global_store_b128 v24, v[20:23], s[8:9] offset:96
+; GFX12-NEXT: v_dual_mov_b32 v0, s28 :: v_dual_mov_b32 v3, s27
+; GFX12-NEXT: v_dual_mov_b32 v1, s29 :: v_dual_mov_b32 v2, s26
+; GFX12-NEXT: v_mov_b32_e32 v5, s21
+; GFX12-NEXT: s_lshr_b32 s64, s0, 8
; GFX12-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v16, s6 :: v_dual_mov_b32 v19, s58
-; GFX12-NEXT: v_dual_mov_b32 v18, s62 :: v_dual_mov_b32 v21, s25
; GFX12-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v20, s24 :: v_dual_mov_b32 v23, s27
-; GFX12-NEXT: v_mov_b32_e32 v22, s26
-; GFX12-NEXT: s_clause 0x5
-; GFX12-NEXT: global_store_b128 v24, v[0:3], s[8:9] offset:80
-; GFX12-NEXT: global_store_b128 v24, v[4:7], s[8:9] offset:64
-; GFX12-NEXT: global_store_b128 v24, v[8:11], s[8:9] offset:16
-; GFX12-NEXT: global_store_b128 v24, v[12:15], s[8:9]
-; GFX12-NEXT: global_store_b128 v24, v[16:19], s[8:9] offset:240
-; GFX12-NEXT: global_store_b128 v24, v[20:23], s[8:9] offset:224
-; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v3, s63
-; GFX12-NEXT: v_dual_mov_b32 v1, s5 :: v_dual_mov_b32 v2, s60
-; GFX12-NEXT: v_mov_b32_e32 v5, s21
-; GFX12-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
+; GFX12-NEXT: v_dual_mov_b32 v4, s20 :: v_dual_mov_b32 v7, s25
+; GFX12-NEXT: v_dual_mov_b32 v6, s24 :: v_dual_mov_b32 v9, s23
; GFX12-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v4, s20 :: v_dual_mov_b32 v7, s19
-; GFX12-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s17
-; GFX12-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v8, s16 :: v_dual_mov_b32 v11, s59
-; GFX12-NEXT: v_dual_mov_b32 v10, s61 :: v_dual_mov_b32 v13, s15
-; GFX12-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v15, s13
-; GFX12-NEXT: v_dual_mov_b32 v14, s12 :: v_dual_mov_b32 v17, s11
-; GFX12-NEXT: v_dual_mov_b32 v16, s10 :: v_dual_mov_b32 v19, s33
-; GFX12-NEXT: v_dual_mov_b32 v18, s57 :: v_dual_mov_b32 v21, s3
-; GFX12-NEXT: v_dual_mov_b32 v20, s2 :: v_dual_mov_b32 v23, s1
+; GFX12-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_dual_mov_b32 v8, s22 :: v_dual_mov_b32 v11, s6
+; GFX12-NEXT: v_dual_mov_b32 v10, s33 :: v_dual_mov_b32 v13, s19
+; GFX12-NEXT: s_bfe_i64 s[10:11], s[0:1], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[0:1], s[64:65], 0x80000
+; GFX12-NEXT: v_dual_mov_b32 v12, s18 :: v_dual_mov_b32 v15, s17
+; GFX12-NEXT: v_dual_mov_b32 v14, s16 :: v_dual_mov_b32 v17, s15
+; GFX12-NEXT: v_dual_mov_b32 v16, s14 :: v_dual_mov_b32 v19, s13
+; GFX12-NEXT: v_dual_mov_b32 v18, s12 :: v_dual_mov_b32 v21, s11
+; GFX12-NEXT: v_dual_mov_b32 v20, s10 :: v_dual_mov_b32 v23, s1
; GFX12-NEXT: v_mov_b32_e32 v22, s0
; GFX12-NEXT: s_clause 0x5
-; GFX12-NEXT: global_store_b128 v24, v[0:3], s[8:9] offset:176
-; GFX12-NEXT: global_store_b128 v24, v[4:7], s[8:9] offset:160
-; GFX12-NEXT: global_store_b128 v24, v[8:11], s[8:9] offset:112
-; GFX12-NEXT: global_store_b128 v24, v[12:15], s[8:9] offset:96
-; GFX12-NEXT: global_store_b128 v24, v[16:19], s[8:9] offset:48
-; GFX12-NEXT: global_store_b128 v24, v[20:23], s[8:9] offset:32
+; GFX12-NEXT: global_store_b128 v24, v[0:3], s[8:9] offset:80
+; GFX12-NEXT: global_store_b128 v24, v[4:7], s[8:9] offset:64
+; GFX12-NEXT: global_store_b128 v24, v[8:11], s[8:9] offset:48
+; GFX12-NEXT: global_store_b128 v24, v[12:15], s[8:9] offset:32
+; GFX12-NEXT: global_store_b128 v24, v[16:19], s[8:9] offset:16
+; GFX12-NEXT: global_store_b128 v24, v[20:23], s[8:9]
; GFX12-NEXT: s_endpgm
%load = load <32 x i8>, ptr addrspace(4) %in
%ext = sext <32 x i8> %load to <32 x i64>
diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
index bca39d06e941c..00021673c49a7 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
@@ -6362,29 +6362,46 @@ define amdgpu_kernel void @global_sextload_v8i16_to_v8i64(ptr addrspace(1) %out,
; GCN-NOHSA-SI-NEXT: s_mov_b32 s0, s4
; GCN-NOHSA-SI-NEXT: s_mov_b32 s1, s5
; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, v3
-; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v6, 16, v2
-; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v7, 16, v0
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v4, v2, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v11, 31, v1
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v10, 16, v1
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v8, v1, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v15, 31, v3
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v14, 16, v3
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v12, v5, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v5, 31, v4
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v2, v7, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v9, 31, v8
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v6, v6, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v13, 31, v12
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v3, 31, v2
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v7, 31, v6
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:16
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:32
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s4, v0
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s5, v1
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s7, v3
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s6, v2
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s8, s7
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s10, s5
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s12, s6, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s14, s4, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[4:5], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[6:7], 0x100000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s13, s5, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s15, s5, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[10:11], 0x100000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s20, s7, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s21, s7, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[8:9], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[14:15], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[12:13], 0x100000
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s6
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s7
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s21
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s20
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
+; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s15
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s13
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s18
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s19
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s16
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s17
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s10
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s11
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s8
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s9
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0
; GCN-NOHSA-SI-NEXT: s_endpgm
;
; GCN-HSA-LABEL: global_sextload_v8i16_to_v8i64:
@@ -6397,44 +6414,59 @@ define amdgpu_kernel void @global_sextload_v8i16_to_v8i64(ptr addrspace(1) %out,
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
; GCN-HSA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 48
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v19, s3
-; GCN-HSA-NEXT: v_mov_b32_e32 v18, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 32
; GCN-HSA-NEXT: v_mov_b32_e32 v17, s1
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v16, s0
-; GCN-HSA-NEXT: s_add_u32 s0, s0, 16
-; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v23, s1
-; GCN-HSA-NEXT: v_mov_b32_e32 v21, s3
-; GCN-HSA-NEXT: v_mov_b32_e32 v22, s0
-; GCN-HSA-NEXT: v_mov_b32_e32 v20, s2
; GCN-HSA-NEXT: s_waitcnt vmcnt(0)
-; GCN-HSA-NEXT: v_mov_b32_e32 v8, v3
-; GCN-HSA-NEXT: v_lshrrev_b32_e32 v9, 16, v2
-; GCN-HSA-NEXT: v_lshrrev_b32_e32 v5, 16, v0
-; GCN-HSA-NEXT: v_bfe_i32 v4, v1, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v7, 31, v1
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v6, 16, v1
-; GCN-HSA-NEXT: v_bfe_i32 v12, v2, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v2, v5, 0, 16
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v5, 31, v4
-; GCN-HSA-NEXT: v_bfe_i32 v14, v9, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v8, v8, 0, 16
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v3
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v10, 16, v3
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v13, 31, v12
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v3, 31, v2
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v15, 31, v14
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8
-; GCN-HSA-NEXT: flat_store_dwordx4 v[22:23], v[4:7]
-; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[8:11]
-; GCN-HSA-NEXT: flat_store_dwordx4 v[20:21], v[12:15]
-; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[0:3]
+; GCN-HSA-NEXT: v_readfirstlane_b32 s2, v0
+; GCN-HSA-NEXT: v_readfirstlane_b32 s3, v1
+; GCN-HSA-NEXT: v_readfirstlane_b32 s4, v2
+; GCN-HSA-NEXT: v_readfirstlane_b32 s5, v3
+; GCN-HSA-NEXT: s_mov_b32 s6, s5
+; GCN-HSA-NEXT: s_mov_b32 s8, s3
+; GCN-HSA-NEXT: s_lshr_b32 s10, s4, 16
+; GCN-HSA-NEXT: s_lshr_b32 s12, s2, 16
+; GCN-HSA-NEXT: s_ashr_i32 s11, s3, 31
+; GCN-HSA-NEXT: s_ashr_i32 s13, s3, 16
+; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[2:3], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[16:17], s[4:5], 0x100000
+; GCN-HSA-NEXT: s_ashr_i32 s18, s5, 31
+; GCN-HSA-NEXT: s_ashr_i32 s19, s5, 16
+; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[12:13], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[4:5], s[10:11], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x100000
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s6
+; GCN-HSA-NEXT: s_add_u32 s6, s0, 48
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s7
+; GCN-HSA-NEXT: s_addc_u32 s7, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v19, s7
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s19
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s18
+; GCN-HSA-NEXT: v_mov_b32_e32 v14, s2
+; GCN-HSA-NEXT: v_mov_b32_e32 v18, s6
+; GCN-HSA-NEXT: s_add_u32 s2, s0, 16
+; GCN-HSA-NEXT: v_mov_b32_e32 v15, s3
+; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[0:3]
+; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
+; GCN-HSA-NEXT: v_mov_b32_e32 v6, s13
+; GCN-HSA-NEXT: v_mov_b32_e32 v7, s11
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s8
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s9
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
+; GCN-HSA-NEXT: s_add_u32 s0, s0, 32
+; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s0
+; GCN-HSA-NEXT: v_mov_b32_e32 v8, s16
+; GCN-HSA-NEXT: v_mov_b32_e32 v9, s17
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s4
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s5
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s1
+; GCN-HSA-NEXT: v_mov_b32_e32 v12, s14
+; GCN-HSA-NEXT: v_mov_b32_e32 v13, s15
+; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[12:15]
; GCN-HSA-NEXT: s_endpgm
;
; GCN-NOHSA-VI-LABEL: global_sextload_v8i16_to_v8i64:
@@ -6980,149 +7012,212 @@ define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(ptr addrspace(1) %ou
; GCN-NOHSA-SI-NEXT: s_mov_b32 s0, s4
; GCN-NOHSA-SI-NEXT: s_mov_b32 s1, s5
; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(1)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, v3
-; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v13, 16, v0
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s4, v0
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s5, v1
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s6, v2
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s7, v3
; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, v7
-; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v17, 16, v6
-; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v18, 16, v4
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v11, 31, v5
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v10, 16, v5
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v8, v5, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v15, 31, v7
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v14, 16, v7
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v16, v6, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v12, v12, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v20, v0, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v6, v18, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v18, v17, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v13, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v23, v9, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v26, 31, v3
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 16, v3
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:112
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v27, 31, v1
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s8, v4
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s9, v5
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s11, v7
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s10, v6
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s12, s7
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s14, s5
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s16, s11
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s18, s9
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s20, s6, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s22, s4, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s24, s10, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s26, s8, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[28:29], s[8:9], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[30:31], s[10:11], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[34:35], s[4:5], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[6:7], 0x100000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s21, s9, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s23, s9, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[18:19], 0x100000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s25, s11, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s27, s11, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[16:17], 0x100000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s5, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s38, s5, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[14:15], 0x100000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s39, s7, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s40, s7, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[12:13], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[26:27], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[24:25], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[22:23], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[20:21], 0x100000
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s6
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s7
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s40
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s39
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v26, 16, v1
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v24, v1, 0, 16
-; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v0, v2, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v2, v1, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v5, 31, v4
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v17, 31, v16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v21, 31, v20
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v9, 31, v8
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v13, 31, v12
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 31, v24
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v7, 31, v6
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v19, 31, v18
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v23, 31, v22
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v3, 31, v2
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:80
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s38
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s33
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
+; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s10
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s11
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s27
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s25
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
+; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s8
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s9
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s23
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s21
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s36
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s37
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s34
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s35
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s30
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s31
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s28
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s29
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s18
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s19
; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:64
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:32
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s16
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s17
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:64
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s14
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s15
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s12
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s13
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0
; GCN-NOHSA-SI-NEXT: s_endpgm
;
; GCN-HSA-LABEL: global_sextload_v16i16_to_v16i64:
; GCN-HSA: ; %bb.0:
; GCN-HSA-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; GCN-HSA-NEXT: s_add_i32 s12, s12, s17
-; GCN-HSA-NEXT: s_mov_b32 flat_scratch_lo, s13
; GCN-HSA-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; GCN-HSA-NEXT: s_mov_b32 flat_scratch_lo, s13
; GCN-HSA-NEXT: s_waitcnt lgkmcnt(0)
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT: flat_load_dwordx4 v[4:7], v[0:1]
; GCN-HSA-NEXT: s_add_u32 s2, s2, 16
; GCN-HSA-NEXT: s_addc_u32 s3, s3, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3
; GCN-HSA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 48
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v15, s3
-; GCN-HSA-NEXT: v_mov_b32_e32 v14, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 32
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v17, s3
-; GCN-HSA-NEXT: v_mov_b32_e32 v16, s2
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2
+; GCN-HSA-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
+; GCN-HSA-NEXT: v_mov_b32_e32 v19, s1
+; GCN-HSA-NEXT: v_mov_b32_e32 v18, s0
+; GCN-HSA-NEXT: s_waitcnt vmcnt(1)
+; GCN-HSA-NEXT: v_readfirstlane_b32 s2, v0
+; GCN-HSA-NEXT: v_readfirstlane_b32 s3, v1
+; GCN-HSA-NEXT: v_readfirstlane_b32 s4, v2
+; GCN-HSA-NEXT: v_readfirstlane_b32 s5, v3
+; GCN-HSA-NEXT: s_mov_b32 s6, s5
+; GCN-HSA-NEXT: s_mov_b32 s8, s3
+; GCN-HSA-NEXT: s_lshr_b32 s10, s4, 16
+; GCN-HSA-NEXT: s_lshr_b32 s12, s2, 16
+; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[2:3], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[16:17], s[4:5], 0x100000
+; GCN-HSA-NEXT: s_ashr_i32 s18, s3, 31
+; GCN-HSA-NEXT: s_ashr_i32 s19, s3, 16
+; GCN-HSA-NEXT: s_ashr_i32 s20, s5, 31
+; GCN-HSA-NEXT: s_ashr_i32 s21, s5, 16
+; GCN-HSA-NEXT: s_waitcnt vmcnt(0)
+; GCN-HSA-NEXT: v_readfirstlane_b32 s2, v4
+; GCN-HSA-NEXT: v_readfirstlane_b32 s3, v5
+; GCN-HSA-NEXT: v_readfirstlane_b32 s4, v6
+; GCN-HSA-NEXT: v_readfirstlane_b32 s5, v7
+; GCN-HSA-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x100000
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s21
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s20
+; GCN-HSA-NEXT: v_mov_b32_e32 v6, s19
+; GCN-HSA-NEXT: v_mov_b32_e32 v7, s18
+; GCN-HSA-NEXT: v_mov_b32_e32 v8, s16
+; GCN-HSA-NEXT: v_mov_b32_e32 v9, s17
+; GCN-HSA-NEXT: v_mov_b32_e32 v12, s14
+; GCN-HSA-NEXT: v_mov_b32_e32 v13, s15
+; GCN-HSA-NEXT: s_mov_b32 s14, s5
+; GCN-HSA-NEXT: s_mov_b32 s16, s3
+; GCN-HSA-NEXT: s_lshr_b32 s18, s4, 16
+; GCN-HSA-NEXT: s_lshr_b32 s20, s2, 16
+; GCN-HSA-NEXT: s_ashr_i32 s15, s3, 31
+; GCN-HSA-NEXT: s_ashr_i32 s17, s3, 16
+; GCN-HSA-NEXT: s_ashr_i32 s19, s5, 31
+; GCN-HSA-NEXT: s_ashr_i32 s21, s5, 16
+; GCN-HSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[22:23], s[2:3], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[24:25], s[4:5], 0x100000
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s6
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s7
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s8
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s9
+; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[20:21], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[4:5], s[18:19], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[6:7], s[16:17], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[8:9], s[14:15], 0x100000
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s10
+; GCN-HSA-NEXT: s_add_u32 s10, s0, 48
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s11
+; GCN-HSA-NEXT: v_mov_b32_e32 v14, s12
+; GCN-HSA-NEXT: v_mov_b32_e32 v15, s13
+; GCN-HSA-NEXT: s_addc_u32 s11, s1, 0
+; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[12:15]
+; GCN-HSA-NEXT: v_mov_b32_e32 v16, s21
+; GCN-HSA-NEXT: v_mov_b32_e32 v13, s11
+; GCN-HSA-NEXT: v_mov_b32_e32 v12, s10
+; GCN-HSA-NEXT: flat_store_dwordx4 v[12:13], v[0:3]
+; GCN-HSA-NEXT: v_mov_b32_e32 v17, s19
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s2
; GCN-HSA-NEXT: s_add_u32 s2, s0, 16
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s3
; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v19, s3
-; GCN-HSA-NEXT: v_mov_b32_e32 v18, s2
+; GCN-HSA-NEXT: v_mov_b32_e32 v13, s3
+; GCN-HSA-NEXT: v_mov_b32_e32 v12, s2
; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x70
; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v21, s3
-; GCN-HSA-NEXT: v_mov_b32_e32 v20, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x60
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v23, s3
-; GCN-HSA-NEXT: v_mov_b32_e32 v22, s2
+; GCN-HSA-NEXT: flat_store_dwordx4 v[12:13], v[4:7]
+; GCN-HSA-NEXT: v_mov_b32_e32 v14, s8
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2
; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x50
-; GCN-HSA-NEXT: v_mov_b32_e32 v13, s1
+; GCN-HSA-NEXT: v_mov_b32_e32 v15, s9
; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v12, s0
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[14:17]
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2
+; GCN-HSA-NEXT: s_add_u32 s2, s0, 32
+; GCN-HSA-NEXT: v_mov_b32_e32 v20, s17
+; GCN-HSA-NEXT: v_mov_b32_e32 v21, s15
+; GCN-HSA-NEXT: v_mov_b32_e32 v18, s6
+; GCN-HSA-NEXT: v_mov_b32_e32 v19, s7
+; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[18:21]
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2
+; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x60
+; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[8:11]
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3
; GCN-HSA-NEXT: s_add_u32 s0, s0, 64
-; GCN-HSA-NEXT: v_mov_b32_e32 v25, s3
+; GCN-HSA-NEXT: v_mov_b32_e32 v22, s24
+; GCN-HSA-NEXT: v_mov_b32_e32 v23, s25
+; GCN-HSA-NEXT: v_mov_b32_e32 v24, s4
+; GCN-HSA-NEXT: v_mov_b32_e32 v25, s5
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2
; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v24, s2
-; GCN-HSA-NEXT: v_mov_b32_e32 v27, s1
-; GCN-HSA-NEXT: v_mov_b32_e32 v26, s0
-; GCN-HSA-NEXT: s_waitcnt vmcnt(1)
-; GCN-HSA-NEXT: v_bfe_i32 v8, v5, 0, 16
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v5
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v10, 16, v5
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8
-; GCN-HSA-NEXT: v_mov_b32_e32 v5, v7
-; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[8:11]
-; GCN-HSA-NEXT: v_lshrrev_b32_e32 v18, 16, v6
-; GCN-HSA-NEXT: v_bfe_i32 v8, v5, 0, 16
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v7
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v10, 16, v7
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8
-; GCN-HSA-NEXT: v_lshrrev_b32_e32 v19, 16, v4
-; GCN-HSA-NEXT: flat_store_dwordx4 v[14:15], v[8:11]
-; GCN-HSA-NEXT: v_bfe_i32 v7, v6, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v9, v18, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v4, v4, 0, 16
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v8, 31, v7
-; GCN-HSA-NEXT: v_bfe_i32 v6, v19, 0, 16
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v10, 31, v9
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v5, 31, v4
-; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[7:10]
-; GCN-HSA-NEXT: s_waitcnt vmcnt(3)
-; GCN-HSA-NEXT: v_mov_b32_e32 v15, v3
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v7, 31, v6
-; GCN-HSA-NEXT: v_lshrrev_b32_e32 v14, 16, v2
-; GCN-HSA-NEXT: v_lshrrev_b32_e32 v18, 16, v0
-; GCN-HSA-NEXT: flat_store_dwordx4 v[12:13], v[4:7]
-; GCN-HSA-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v4, v1, 0, 16
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v7, 31, v1
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v6, 16, v1
-; GCN-HSA-NEXT: v_bfe_i32 v12, v2, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v2, v18, 0, 16
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v5, 31, v4
-; GCN-HSA-NEXT: v_bfe_i32 v14, v14, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v8, v15, 0, 16
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v3
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v10, 16, v3
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v13, 31, v12
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v3, 31, v2
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v15, 31, v14
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8
-; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[4:7]
-; GCN-HSA-NEXT: flat_store_dwordx4 v[20:21], v[8:11]
-; GCN-HSA-NEXT: flat_store_dwordx4 v[22:23], v[12:15]
-; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[0:3]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[22:25]
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s1
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s22
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s23
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s0
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: s_endpgm
;
; GCN-NOHSA-VI-LABEL: global_sextload_v16i16_to_v16i64:
@@ -8111,299 +8206,420 @@ define amdgpu_kernel void @global_sextload_v32i16_to_v32i64(ptr addrspace(1) %ou
; GCN-NOHSA-SI-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NOHSA-SI-NEXT: s_mov_b32 s8, s6
; GCN-NOHSA-SI-NEXT: s_mov_b32 s9, s7
-; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[12:15], off, s[8:11], 0 offset:48
-; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0 offset:32
+; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0 offset:48
+; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:32
; GCN-NOHSA-SI-NEXT: s_mov_b32 s0, s4
; GCN-NOHSA-SI-NEXT: s_mov_b32 s1, s5
; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[8:11], off, s[8:11], 0
-; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16
+; GCN-NOHSA-SI-NEXT: buffer_load_dwordx4 v[12:15], off, s[8:11], 0 offset:16
; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(3)
-; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v20, 16, v14
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v23, v15
-; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v21, 16, v12
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s10, v0
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s11, v1
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s12, v2
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s13, v3
; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(2)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v27, v3
-; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v16, 16, v0
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s16, v4
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s17, v5
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s14, v6
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s15, v7
; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v17, 16, v6
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v22, v7
-; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v18, 16, v4
-; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v19, 16, v10
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v23, v23, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v26, 31, v15
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 16, v15
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:240
-; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v26, 31, v13
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 16, v13
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v23, v13, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:208
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, v11
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s18, v12
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s19, v13
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s20, v14
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s21, v15
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s22, v8
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s23, v9
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s25, v11
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s24, v10
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s26, s13
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s30, s11
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s34, s15
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s36, s17
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s38, s21
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s40, s19
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s42, s25
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s44, s23
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s12, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s46, s10, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s48, s14, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s50, s16, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s52, s20, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s54, s18, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s56, s24, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s58, s22, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[22:23], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[24:25], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[18:19], 0x100000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s23, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s47, s23, 16
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s49, s25, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s51, s25, 16
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s53, s19, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s55, s19, 16
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s57, s21, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s59, s21, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[22:23], s[26:27], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[20:21], 0x100000
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s22
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s23
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s64, s17, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s65, s17, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[20:21], s[30:31], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[26:27], s[16:17], 0x100000
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s20
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s21
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s16, s13, 16
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s17, s13, 31
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s16
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s16, s11, 16
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s17
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s17, s11, 31
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s16
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s66, s15, 31
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s17
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s67, s15, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[30:31], s[14:15], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[60:61], s[10:11], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[62:63], s[12:13], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[58:59], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[56:57], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[54:55], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[52:53], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[20:21], s[50:51], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[22:23], s[48:49], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[24:25], s[46:47], 0x100000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x100000
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:240
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:208
+; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(1)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s34
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s35
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s67
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s66
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:176
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v23, v27, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v26, 31, v3
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 16, v3
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:176
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s36
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s37
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s65
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s64
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:144
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v26, 31, v1
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 16, v1
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v23, v1, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:144
-; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v1, 16, v8
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v22, 0, 16
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s38
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s39
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s59
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s57
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 31, v7
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 16, v7
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v23, 31, v22
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:112
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s40
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s41
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s55
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s53
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 31, v5
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 16, v5
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v5, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v23, 31, v22
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:80
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s42
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s43
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s51
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s49
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 31, v11
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v24, 16, v11
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v13, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v23, 31, v22
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:48
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v11, v12, 0, 16
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s44
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s45
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s47
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s33
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v14, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v13, v21, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v24, v20, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v3, v1, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v1, v8, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v23, 31, v22
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 31, v24
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:224
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v8, 31, v9
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v7, 16, v9
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v5, v9, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v9, v10, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v12, 31, v11
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v14, 31, v13
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[11:14], off, s[0:3], 0 offset:192
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s62
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s63
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s60
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s61
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s30
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s31
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s26
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s27
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s18
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s19
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v20, s8
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v21, s9
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v24, s6
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v25, s7
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s28
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s29
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:224
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v13, v4, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v20, v6, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v11, v19, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v15, v18, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v22, v17, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v26, v16, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v24, v0, 0, 16
-; GCN-NOHSA-SI-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v28, v2, 0, 16
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v30, v0, 0, 16
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v2, 31, v1
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v10, 31, v9
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v14, 31, v13
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v21, 31, v20
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v25, 31, v24
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v29, 31, v28
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v6, 31, v5
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v4, 31, v3
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v12, 31, v11
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v16, 31, v15
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v23, 31, v22
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v27, 31, v26
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v31, 31, v30
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[28:31], off, s[0:3], 0 offset:160
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:128
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:96
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[13:16], off, s[0:3], 0 offset:64
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[9:12], off, s[0:3], 0 offset:32
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[1:4], off, s[0:3], 0
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[5:8], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s24
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s25
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:192
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s22
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s23
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:160
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s20
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s21
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:128
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v18, s16
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v19, s17
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:96
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v22, s14
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v23, s15
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:64
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v26, s12
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v27, s13
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s10
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s11
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GCN-NOHSA-SI-NEXT: s_endpgm
;
; GCN-HSA-LABEL: global_sextload_v32i16_to_v32i64:
; GCN-HSA: ; %bb.0:
; GCN-HSA-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; GCN-HSA-NEXT: s_add_i32 s12, s12, s17
-; GCN-HSA-NEXT: s_mov_b32 flat_scratch_lo, s13
; GCN-HSA-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; GCN-HSA-NEXT: s_mov_b32 flat_scratch_lo, s13
; GCN-HSA-NEXT: s_waitcnt lgkmcnt(0)
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT: flat_load_dwordx4 v[12:15], v[0:1]
; GCN-HSA-NEXT: s_add_u32 s4, s2, 48
+; GCN-HSA-NEXT: flat_load_dwordx4 v[4:7], v[0:1]
; GCN-HSA-NEXT: s_addc_u32 s5, s3, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s4
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s5
; GCN-HSA-NEXT: s_add_u32 s4, s2, 32
+; GCN-HSA-NEXT: flat_load_dwordx4 v[14:17], v[0:1]
; GCN-HSA-NEXT: s_addc_u32 s5, s3, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s4
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s5
; GCN-HSA-NEXT: s_add_u32 s2, s2, 16
+; GCN-HSA-NEXT: flat_load_dwordx4 v[10:13], v[0:1]
; GCN-HSA-NEXT: s_addc_u32 s3, s3, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3
-; GCN-HSA-NEXT: flat_load_dwordx4 v[8:11], v[0:1]
-; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2
-; GCN-HSA-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
+; GCN-HSA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
+; GCN-HSA-NEXT: s_waitcnt vmcnt(3)
+; GCN-HSA-NEXT: v_readfirstlane_b32 s7, v5
+; GCN-HSA-NEXT: v_readfirstlane_b32 s8, v6
+; GCN-HSA-NEXT: v_readfirstlane_b32 s9, v7
+; GCN-HSA-NEXT: v_readfirstlane_b32 s6, v4
+; GCN-HSA-NEXT: s_mov_b32 s10, s9
+; GCN-HSA-NEXT: s_mov_b32 s12, s7
+; GCN-HSA-NEXT: s_lshr_b32 s14, s8, 16
+; GCN-HSA-NEXT: s_waitcnt vmcnt(2)
+; GCN-HSA-NEXT: v_readfirstlane_b32 s23, v17
+; GCN-HSA-NEXT: s_lshr_b32 s16, s6, 16
+; GCN-HSA-NEXT: v_readfirstlane_b32 s18, v14
+; GCN-HSA-NEXT: v_readfirstlane_b32 s19, v15
+; GCN-HSA-NEXT: s_bfe_i64 s[20:21], s[14:15], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[10:11], 0x100000
+; GCN-HSA-NEXT: s_mov_b32 s24, s23
+; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[6:7], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[4:5], s[8:9], 0x100000
+; GCN-HSA-NEXT: s_ashr_i32 s33, s7, 31
+; GCN-HSA-NEXT: s_ashr_i32 s49, s7, 16
+; GCN-HSA-NEXT: s_ashr_i32 s50, s9, 31
+; GCN-HSA-NEXT: s_ashr_i32 s51, s9, 16
+; GCN-HSA-NEXT: v_readfirstlane_b32 s22, v16
+; GCN-HSA-NEXT: s_bfe_i64 s[8:9], s[16:17], 0x100000
+; GCN-HSA-NEXT: s_mov_b32 s26, s19
+; GCN-HSA-NEXT: s_lshr_b32 s16, s18, 16
+; GCN-HSA-NEXT: s_bfe_i64 s[6:7], s[18:19], 0x100000
+; GCN-HSA-NEXT: s_ashr_i32 s52, s19, 31
+; GCN-HSA-NEXT: s_ashr_i32 s53, s19, 16
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s14
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s15
+; GCN-HSA-NEXT: v_mov_b32_e32 v8, s12
+; GCN-HSA-NEXT: v_mov_b32_e32 v9, s13
+; GCN-HSA-NEXT: s_waitcnt vmcnt(1)
+; GCN-HSA-NEXT: v_readfirstlane_b32 s12, v10
+; GCN-HSA-NEXT: v_readfirstlane_b32 s13, v11
+; GCN-HSA-NEXT: v_readfirstlane_b32 s14, v12
+; GCN-HSA-NEXT: v_readfirstlane_b32 s15, v13
+; GCN-HSA-NEXT: s_bfe_i64 s[18:19], s[24:25], 0x100000
+; GCN-HSA-NEXT: s_lshr_b32 s28, s22, 16
+; GCN-HSA-NEXT: s_bfe_i64 s[10:11], s[22:23], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[38:39], s[26:27], 0x100000
+; GCN-HSA-NEXT: v_mov_b32_e32 v12, s18
+; GCN-HSA-NEXT: v_mov_b32_e32 v13, s19
+; GCN-HSA-NEXT: s_lshr_b32 s22, s12, 16
+; GCN-HSA-NEXT: s_bfe_i64 s[18:19], s[12:13], 0x100000
+; GCN-HSA-NEXT: s_ashr_i32 s58, s15, 31
+; GCN-HSA-NEXT: s_ashr_i32 s59, s15, 16
+; GCN-HSA-NEXT: s_mov_b32 s12, s15
+; GCN-HSA-NEXT: s_lshr_b32 s26, s14, 16
+; GCN-HSA-NEXT: s_bfe_i64 s[30:31], s[14:15], 0x100000
+; GCN-HSA-NEXT: s_waitcnt vmcnt(0)
+; GCN-HSA-NEXT: v_readfirstlane_b32 s14, v0
+; GCN-HSA-NEXT: v_readfirstlane_b32 s15, v1
+; GCN-HSA-NEXT: v_readfirstlane_b32 s36, v2
+; GCN-HSA-NEXT: v_readfirstlane_b32 s37, v3
+; GCN-HSA-NEXT: s_mov_b32 s24, s13
+; GCN-HSA-NEXT: s_bfe_i64 s[34:35], s[26:27], 0x100000
+; GCN-HSA-NEXT: s_mov_b32 s44, s15
+; GCN-HSA-NEXT: s_lshr_b32 s26, s14, 16
+; GCN-HSA-NEXT: s_mov_b32 s46, s37
+; GCN-HSA-NEXT: s_lshr_b32 s48, s36, 16
+; GCN-HSA-NEXT: s_ashr_i32 s54, s23, 31
+; GCN-HSA-NEXT: s_ashr_i32 s55, s23, 16
+; GCN-HSA-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x100000
+; GCN-HSA-NEXT: s_ashr_i32 s56, s13, 31
+; GCN-HSA-NEXT: s_ashr_i32 s57, s13, 16
+; GCN-HSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[40:41], s[24:25], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[42:43], s[12:13], 0x100000
+; GCN-HSA-NEXT: s_ashr_i32 s60, s15, 31
+; GCN-HSA-NEXT: s_ashr_i32 s61, s15, 16
+; GCN-HSA-NEXT: s_bfe_i64 s[12:13], s[14:15], 0x100000
+; GCN-HSA-NEXT: s_ashr_i32 s62, s37, 31
+; GCN-HSA-NEXT: s_ashr_i32 s63, s37, 16
+; GCN-HSA-NEXT: s_bfe_i64 s[24:25], s[36:37], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[26:27], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[48:49], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[36:37], s[44:45], 0x100000
+; GCN-HSA-NEXT: s_bfe_i64 s[44:45], s[46:47], 0x100000
+; GCN-HSA-NEXT: s_add_u32 s46, s0, 48
+; GCN-HSA-NEXT: s_addc_u32 s47, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v22, s46
+; GCN-HSA-NEXT: v_mov_b32_e32 v23, s47
+; GCN-HSA-NEXT: s_add_u32 s46, s0, 16
+; GCN-HSA-NEXT: s_addc_u32 s47, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v24, s46
+; GCN-HSA-NEXT: v_mov_b32_e32 v25, s47
+; GCN-HSA-NEXT: s_add_u32 s46, s0, 0xf0
+; GCN-HSA-NEXT: s_addc_u32 s47, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s38
+; GCN-HSA-NEXT: s_add_u32 s38, s0, 0xd0
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s39
+; GCN-HSA-NEXT: s_addc_u32 s39, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v28, s38
+; GCN-HSA-NEXT: v_mov_b32_e32 v29, s39
+; GCN-HSA-NEXT: s_add_u32 s38, s0, 0xb0
+; GCN-HSA-NEXT: s_addc_u32 s39, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v30, s38
+; GCN-HSA-NEXT: v_mov_b32_e32 v31, s39
+; GCN-HSA-NEXT: s_add_u32 s38, s0, 0x90
+; GCN-HSA-NEXT: s_addc_u32 s39, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v32, s38
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s49
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s33
+; GCN-HSA-NEXT: v_mov_b32_e32 v33, s39
+; GCN-HSA-NEXT: s_add_u32 s38, s0, 0x70
+; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[8:11]
+; GCN-HSA-NEXT: s_addc_u32 s39, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v24, s38
+; GCN-HSA-NEXT: v_mov_b32_e32 v25, s39
+; GCN-HSA-NEXT: s_add_u32 s38, s0, 0x50
+; GCN-HSA-NEXT: v_mov_b32_e32 v26, s46
+; GCN-HSA-NEXT: v_mov_b32_e32 v6, s51
+; GCN-HSA-NEXT: v_mov_b32_e32 v7, s50
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s53
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s52
+; GCN-HSA-NEXT: s_addc_u32 s39, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v27, s47
+; GCN-HSA-NEXT: v_mov_b32_e32 v16, s42
+; GCN-HSA-NEXT: v_mov_b32_e32 v17, s43
+; GCN-HSA-NEXT: v_mov_b32_e32 v14, s55
+; GCN-HSA-NEXT: v_mov_b32_e32 v15, s54
+; GCN-HSA-NEXT: v_mov_b32_e32 v18, s59
+; GCN-HSA-NEXT: v_mov_b32_e32 v19, s58
+; GCN-HSA-NEXT: flat_store_dwordx4 v[22:23], v[4:7]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[28:29], v[0:3]
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s44
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s45
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s4
+; GCN-HSA-NEXT: s_add_u32 s4, s0, 32
+; GCN-HSA-NEXT: v_mov_b32_e32 v20, s40
+; GCN-HSA-NEXT: v_mov_b32_e32 v21, s41
+; GCN-HSA-NEXT: v_mov_b32_e32 v22, s57
+; GCN-HSA-NEXT: v_mov_b32_e32 v23, s56
+; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[12:15]
+; GCN-HSA-NEXT: v_mov_b32_e32 v6, s63
+; GCN-HSA-NEXT: v_mov_b32_e32 v7, s62
+; GCN-HSA-NEXT: flat_store_dwordx4 v[30:31], v[16:19]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[32:33], v[20:23]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[4:7]
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s5
-; GCN-HSA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 32
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v23, s3
-; GCN-HSA-NEXT: v_mov_b32_e32 v22, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 48
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v25, s3
-; GCN-HSA-NEXT: v_mov_b32_e32 v24, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 16
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v27, s3
-; GCN-HSA-NEXT: v_mov_b32_e32 v26, s2
+; GCN-HSA-NEXT: s_addc_u32 s5, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s4
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s20
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s21
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s5
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s1
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xe0
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s8
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s9
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s0
; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v21, s1
-; GCN-HSA-NEXT: v_mov_b32_e32 v20, s0
-; GCN-HSA-NEXT: s_waitcnt vmcnt(3)
-; GCN-HSA-NEXT: v_bfe_i32 v16, v13, 0, 16
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v13
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v18, 16, v13
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v16
-; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[16:19]
-; GCN-HSA-NEXT: v_mov_b32_e32 v27, s3
-; GCN-HSA-NEXT: v_lshrrev_b32_e32 v13, 16, v14
-; GCN-HSA-NEXT: v_mov_b32_e32 v26, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xf0
-; GCN-HSA-NEXT: v_bfe_i32 v18, v13, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v16, v14, 0, 16
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v16
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v18
-; GCN-HSA-NEXT: flat_store_dwordx4 v[22:23], v[16:19]
-; GCN-HSA-NEXT: v_mov_b32_e32 v23, s3
-; GCN-HSA-NEXT: v_mov_b32_e32 v13, v15
-; GCN-HSA-NEXT: v_mov_b32_e32 v22, s2
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2
; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xc0
-; GCN-HSA-NEXT: v_bfe_i32 v13, v13, 0, 16
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v16, 31, v15
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v15, 16, v15
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v14, 31, v13
-; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[13:16]
-; GCN-HSA-NEXT: v_mov_b32_e32 v25, s3
-; GCN-HSA-NEXT: v_lshrrev_b32_e32 v14, 16, v12
-; GCN-HSA-NEXT: v_mov_b32_e32 v24, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xd0
-; GCN-HSA-NEXT: v_bfe_i32 v12, v12, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v14, v14, 0, 16
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v13, 31, v12
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v15, 31, v14
-; GCN-HSA-NEXT: v_mov_b32_e32 v17, s3
-; GCN-HSA-NEXT: flat_store_dwordx4 v[20:21], v[12:15]
-; GCN-HSA-NEXT: v_mov_b32_e32 v16, s2
-; GCN-HSA-NEXT: s_waitcnt vmcnt(6)
-; GCN-HSA-NEXT: v_bfe_i32 v12, v9, 0, 16
-; GCN-HSA-NEXT: v_lshrrev_b32_e32 v28, 16, v10
-; GCN-HSA-NEXT: v_mov_b32_e32 v29, v11
-; GCN-HSA-NEXT: v_lshrrev_b32_e32 v18, 16, v8
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v15, 31, v9
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v14, 16, v9
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v13, 31, v12
-; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[12:15]
-; GCN-HSA-NEXT: v_bfe_i32 v16, v29, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v12, v8, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v14, v18, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v8, v10, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v10, v28, 0, 16
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xa0
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v11
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v18, 16, v11
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8
-; GCN-HSA-NEXT: s_waitcnt vmcnt(5)
-; GCN-HSA-NEXT: v_lshrrev_b32_e32 v20, 16, v2
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v10
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v15, 31, v14
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v16
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v13, 31, v12
-; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[8:11]
-; GCN-HSA-NEXT: flat_store_dwordx4 v[22:23], v[16:19]
-; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[12:15]
-; GCN-HSA-NEXT: v_bfe_i32 v8, v2, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v10, v20, 0, 16
-; GCN-HSA-NEXT: v_mov_b32_e32 v15, s3
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v10
-; GCN-HSA-NEXT: v_mov_b32_e32 v14, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xb0
-; GCN-HSA-NEXT: v_mov_b32_e32 v21, v3
-; GCN-HSA-NEXT: flat_store_dwordx4 v[14:15], v[8:11]
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v11, 31, v3
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v10, 16, v3
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s2
-; GCN-HSA-NEXT: v_bfe_i32 v8, v21, 0, 16
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s3
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x80
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v8
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s10
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s11
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s28
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s29
; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_lshrrev_b32_e32 v26, 16, v0
-; GCN-HSA-NEXT: v_lshrrev_b32_e32 v18, 16, v6
-; GCN-HSA-NEXT: v_lshrrev_b32_e32 v27, 16, v4
-; GCN-HSA-NEXT: v_bfe_i32 v12, v4, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v16, v6, 0, 16
-; GCN-HSA-NEXT: flat_store_dwordx4 v[2:3], v[8:11]
-; GCN-HSA-NEXT: v_bfe_i32 v6, v5, 0, 16
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v5
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v8, 16, v5
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3
-; GCN-HSA-NEXT: v_bfe_i32 v24, v0, 0, 16
-; GCN-HSA-NEXT: v_bfe_i32 v26, v26, 0, 16
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x90
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v25, 31, v24
-; GCN-HSA-NEXT: v_bfe_i32 v14, v27, 0, 16
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v27, 31, v26
+; GCN-HSA-NEXT: s_add_u32 s2, s0, 0xa0
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s6
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s7
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s16
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s17
; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[24:27]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3
-; GCN-HSA-NEXT: v_bfe_i32 v20, v1, 0, 16
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x60
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v23, 31, v1
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v22, 16, v1
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v21, 31, v20
+; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x80
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s30
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s31
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s34
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s35
; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[20:23]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3
-; GCN-HSA-NEXT: v_mov_b32_e32 v19, v7
-; GCN-HSA-NEXT: v_bfe_i32 v18, v18, 0, 16
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x70
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v17, 31, v16
-; GCN-HSA-NEXT: v_bfe_i32 v0, v19, 0, 16
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v19, 31, v18
+; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x60
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s18
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s19
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s22
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s23
; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[16:19]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v3, 31, v7
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v2, 16, v7
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GCN-HSA-NEXT: s_add_u32 s0, s0, 64
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s24
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s25
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s26
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s27
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 64
-; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v13, 31, v12
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v15, 31, v14
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT: s_add_u32 s0, s0, 0x50
-; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[12:15]
; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s0
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v7, 31, v6
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s1
-; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[6:9]
+; GCN-HSA-NEXT: v_mov_b32_e32 v34, s38
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s1
+; GCN-HSA-NEXT: v_mov_b32_e32 v35, s39
+; GCN-HSA-NEXT: v_mov_b32_e32 v8, s36
+; GCN-HSA-NEXT: v_mov_b32_e32 v9, s37
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s61
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s60
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s12
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s13
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s14
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s15
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s0
+; GCN-HSA-NEXT: flat_store_dwordx4 v[34:35], v[8:11]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: s_endpgm
;
; GCN-NOHSA-VI-LABEL: global_sextload_v32i16_to_v32i64:
diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i8.ll b/llvm/test/CodeGen/AMDGPU/load-global-i8.ll
index f879dc660203f..9631398b6de67 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i8.ll
@@ -6270,43 +6270,44 @@ define amdgpu_kernel void @global_sextload_v8i8_to_v8i64(ptr addrspace(1) %out,
; GCN-NOHSA-SI-NEXT: s_mov_b32 s0, s4
; GCN-NOHSA-SI-NEXT: s_mov_b32 s1, s5
; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s4, v1
-; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s5, v0
-; GCN-NOHSA-SI-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s6, s4, 16
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s8, s5, 16
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s10, s5, 24
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s12, s5, 8
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s14, s4, 8
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[4:5], 0x80000
-; GCN-NOHSA-SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s15, s4, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s18, s4, 24
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[14:15], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s4, v0
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s5, v1
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s6, s5, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s8, s5, 8
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s10, s5
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s12, s4, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s14, s4, 24
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s16, s4, 8
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[4:5], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s17, s5, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s20, s5, 24
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[16:17], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s18
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s15
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s16
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s17
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s6
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s7
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:48
-; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s8
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s9
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s20
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s17
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s10
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s11
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:16
-; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s12
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s13
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s4
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s5
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s18
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s19
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s6
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s7
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s8
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s9
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(1)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s12
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s13
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s14
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s15
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s4
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s5
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0
; GCN-NOHSA-SI-NEXT: s_endpgm
;
; GCN-HSA-LABEL: global_sextload_v8i8_to_v8i64:
@@ -6322,53 +6323,55 @@ define amdgpu_kernel void @global_sextload_v8i8_to_v8i64(ptr addrspace(1) %out,
; GCN-HSA-NEXT: v_mov_b32_e32 v17, s1
; GCN-HSA-NEXT: v_mov_b32_e32 v16, s0
; GCN-HSA-NEXT: s_waitcnt vmcnt(0)
-; GCN-HSA-NEXT: v_readfirstlane_b32 s2, v1
-; GCN-HSA-NEXT: v_readfirstlane_b32 s3, v0
-; GCN-HSA-NEXT: s_lshr_b32 s4, s2, 16
-; GCN-HSA-NEXT: s_lshr_b32 s6, s3, 16
-; GCN-HSA-NEXT: s_lshr_b32 s8, s3, 24
-; GCN-HSA-NEXT: s_lshr_b32 s10, s3, 8
-; GCN-HSA-NEXT: s_lshr_b32 s12, s2, 8
-; GCN-HSA-NEXT: s_ashr_i32 s13, s2, 31
-; GCN-HSA-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[2:3], 0x80000
-; GCN-HSA-NEXT: s_ashr_i32 s16, s2, 24
-; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[12:13], 0x80000
+; GCN-HSA-NEXT: v_readfirstlane_b32 s2, v0
+; GCN-HSA-NEXT: v_readfirstlane_b32 s3, v1
+; GCN-HSA-NEXT: s_lshr_b32 s4, s3, 16
+; GCN-HSA-NEXT: s_lshr_b32 s6, s3, 8
+; GCN-HSA-NEXT: s_mov_b32 s8, s3
+; GCN-HSA-NEXT: s_lshr_b32 s10, s2, 16
+; GCN-HSA-NEXT: s_lshr_b32 s12, s2, 24
+; GCN-HSA-NEXT: s_lshr_b32 s14, s2, 8
+; GCN-HSA-NEXT: s_ashr_i32 s15, s3, 31
+; GCN-HSA-NEXT: s_bfe_i64 s[16:17], s[2:3], 0x80000
+; GCN-HSA-NEXT: s_ashr_i32 s18, s3, 24
+; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[14:15], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
; GCN-HSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
; GCN-HSA-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000
; GCN-HSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000
; GCN-HSA-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000
-; GCN-HSA-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; GCN-HSA-NEXT: v_mov_b32_e32 v4, s4
-; GCN-HSA-NEXT: s_add_u32 s4, s0, 48
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s10
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s11
-; GCN-HSA-NEXT: v_mov_b32_e32 v5, s5
-; GCN-HSA-NEXT: s_addc_u32 s5, s1, 0
-; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[0:3]
-; GCN-HSA-NEXT: v_mov_b32_e32 v6, s16
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s4
-; GCN-HSA-NEXT: v_mov_b32_e32 v7, s13
-; GCN-HSA-NEXT: v_mov_b32_e32 v10, s2
+; GCN-HSA-NEXT: s_add_u32 s4, s0, 48
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s5
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 16
-; GCN-HSA-NEXT: v_mov_b32_e32 v11, s3
-; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; GCN-HSA-NEXT: s_addc_u32 s5, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v19, s5
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s18
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s15
+; GCN-HSA-NEXT: v_mov_b32_e32 v6, s2
+; GCN-HSA-NEXT: v_mov_b32_e32 v18, s4
+; GCN-HSA-NEXT: s_add_u32 s2, s0, 32
+; GCN-HSA-NEXT: v_mov_b32_e32 v7, s3
+; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[0:3]
; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT: v_mov_b32_e32 v12, s6
-; GCN-HSA-NEXT: v_mov_b32_e32 v13, s7
-; GCN-HSA-NEXT: v_mov_b32_e32 v14, s8
-; GCN-HSA-NEXT: v_mov_b32_e32 v15, s9
+; GCN-HSA-NEXT: v_mov_b32_e32 v8, s8
+; GCN-HSA-NEXT: v_mov_b32_e32 v9, s9
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s6
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s7
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT: s_add_u32 s0, s0, 32
-; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[12:15]
+; GCN-HSA-NEXT: s_add_u32 s0, s0, 16
+; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s0
-; GCN-HSA-NEXT: v_mov_b32_e32 v8, s14
-; GCN-HSA-NEXT: v_mov_b32_e32 v9, s15
+; GCN-HSA-NEXT: v_mov_b32_e32 v12, s10
+; GCN-HSA-NEXT: v_mov_b32_e32 v13, s11
+; GCN-HSA-NEXT: v_mov_b32_e32 v14, s12
+; GCN-HSA-NEXT: v_mov_b32_e32 v15, s13
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s1
-; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s16
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s17
+; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[12:15]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[4:7]
; GCN-HSA-NEXT: s_endpgm
;
; GCN-NOHSA-VI-LABEL: global_sextload_v8i8_to_v8i64:
@@ -6382,44 +6385,46 @@ define amdgpu_kernel void @global_sextload_v8i8_to_v8i64(ptr addrspace(1) %out,
; GCN-NOHSA-VI-NEXT: s_mov_b32 s8, s6
; GCN-NOHSA-VI-NEXT: s_mov_b32 s9, s7
; GCN-NOHSA-VI-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
-; GCN-NOHSA-VI-NEXT: s_mov_b32 s0, s4
; GCN-NOHSA-VI-NEXT: s_mov_b32 s1, s5
+; GCN-NOHSA-VI-NEXT: s_mov_b32 s0, s4
; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(0)
-; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s4, v1
-; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s5, v0
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s6, s4, 16
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s8, s5, 16
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s10, s5, 24
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s12, s5, 8
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s14, s4, 8
-; GCN-NOHSA-VI-NEXT: s_ashr_i32 s15, s4, 31
-; GCN-NOHSA-VI-NEXT: s_ashr_i32 s18, s4, 24
+; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s5, v1
+; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s4, v0
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s6, s5, 16
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s8, s5, 8
+; GCN-NOHSA-VI-NEXT: s_mov_b32 s10, s5
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s12, s4, 16
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s14, s4, 24
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s16, s4, 8
+; GCN-NOHSA-VI-NEXT: s_ashr_i32 s17, s5, 31
+; GCN-NOHSA-VI-NEXT: s_ashr_i32 s20, s5, 24
; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000
-; GCN-NOHSA-VI-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[16:17], s[4:5], 0x80000
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[4:5], s[14:15], 0x80000
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[18:19], s[4:5], 0x80000
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[4:5], s[16:17], 0x80000
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v6, s18
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v7, s15
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v4, s6
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v5, s7
-; GCN-NOHSA-VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v8, s16
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v9, s17
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v12, s8
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v13, s9
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v14, s10
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v15, s11
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s12
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s13
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v10, s4
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v11, s5
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:48
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:16
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s20
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s17
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s6
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s7
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v4, s18
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v5, s19
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v8, s10
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v9, s11
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v10, s8
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v11, s9
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v12, s12
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v13, s13
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v14, s14
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v15, s15
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v6, s4
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v7, s5
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:16
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0
; GCN-NOHSA-VI-NEXT: s_endpgm
;
; EG-LABEL: global_sextload_v8i8_to_v8i64:
@@ -6934,85 +6939,84 @@ define amdgpu_kernel void @global_sextload_v16i8_to_v16i64(ptr addrspace(1) %out
; GCN-NOHSA-SI-NEXT: s_mov_b32 s0, s4
; GCN-NOHSA-SI-NEXT: s_mov_b32 s1, s5
; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s4, v2
-; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s5, v3
-; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s6, v0
-; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s7, v1
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s12, s4, 16
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s14, s4, 24
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s16, s4, 8
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s18, s6, 16
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s20, s6, 24
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s22, s6, 8
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s24, s5, 16
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s10, s5, 8
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s4, v0
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s5, v1
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s8, v2
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s9, v3
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s10, s9, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s12, s9, 8
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s14, s9
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s16, s8, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s18, s8, 24
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s20, s8, 8
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s22, s5, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s24, s5, 8
; GCN-NOHSA-SI-NEXT: s_mov_b32 s26, s5
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s7, 16
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s30, s7, 8
-; GCN-NOHSA-SI-NEXT: s_mov_b32 s8, s7
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[34:35], s[6:7], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[4:5], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s29, s7, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s31, s7, 24
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s4, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s30, s4, 24
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s34, s4, 8
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[4:5], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[8:9], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s5, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s38, s5, 24
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[30:31], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[28:29], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s31, s5, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s5, 24
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s35, s9, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s38, s9, 24
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[34:35], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[30:31], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s36
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s37
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s34
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s35
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s38
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s33
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s26
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s27
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s12
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s13
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s14
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s15
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:80
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s38
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s35
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s14
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s15
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s36
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s37
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s33
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s31
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s26
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s27
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s10
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s11
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s31
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s29
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s6
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s7
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s12
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s13
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:96
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s16
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s17
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
-; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s18
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s19
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s20
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s21
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s8
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s9
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s22
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s23
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s18
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s19
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:80
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s20
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s21
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:64
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s24
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s25
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:112
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s10
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s11
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:96
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s22
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s23
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:48
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s24
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s25
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s28
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s29
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s8
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s9
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:16
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s6
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s7
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s4
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s5
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GCN-NOHSA-SI-NEXT: s_endpgm
;
; GCN-HSA-LABEL: global_sextload_v16i8_to_v16i64:
@@ -7025,42 +7029,42 @@ define amdgpu_kernel void @global_sextload_v16i8_to_v16i64(ptr addrspace(1) %out
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
; GCN-HSA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
-; GCN-HSA-NEXT: v_mov_b32_e32 v9, s1
-; GCN-HSA-NEXT: v_mov_b32_e32 v8, s0
+; GCN-HSA-NEXT: v_mov_b32_e32 v17, s1
+; GCN-HSA-NEXT: v_mov_b32_e32 v16, s0
; GCN-HSA-NEXT: s_waitcnt vmcnt(0)
-; GCN-HSA-NEXT: v_readfirstlane_b32 s2, v2
-; GCN-HSA-NEXT: v_readfirstlane_b32 s3, v3
-; GCN-HSA-NEXT: v_readfirstlane_b32 s4, v0
-; GCN-HSA-NEXT: v_readfirstlane_b32 s5, v1
-; GCN-HSA-NEXT: s_lshr_b32 s6, s2, 16
-; GCN-HSA-NEXT: s_lshr_b32 s8, s2, 24
-; GCN-HSA-NEXT: s_lshr_b32 s10, s2, 8
+; GCN-HSA-NEXT: v_readfirstlane_b32 s2, v0
+; GCN-HSA-NEXT: v_readfirstlane_b32 s3, v1
+; GCN-HSA-NEXT: v_readfirstlane_b32 s4, v2
+; GCN-HSA-NEXT: v_readfirstlane_b32 s5, v3
+; GCN-HSA-NEXT: s_lshr_b32 s6, s5, 16
+; GCN-HSA-NEXT: s_lshr_b32 s8, s5, 8
+; GCN-HSA-NEXT: s_mov_b32 s10, s5
+; GCN-HSA-NEXT: s_lshr_b32 s12, s4, 16
+; GCN-HSA-NEXT: s_lshr_b32 s14, s4, 24
+; GCN-HSA-NEXT: s_lshr_b32 s16, s4, 8
+; GCN-HSA-NEXT: s_ashr_i32 s7, s5, 31
+; GCN-HSA-NEXT: s_ashr_i32 s9, s5, 24
+; GCN-HSA-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[24:25], s[2:3], 0x80000
; GCN-HSA-NEXT: s_lshr_b32 s18, s3, 16
; GCN-HSA-NEXT: s_lshr_b32 s20, s3, 8
; GCN-HSA-NEXT: s_mov_b32 s22, s3
-; GCN-HSA-NEXT: s_ashr_i32 s7, s3, 31
-; GCN-HSA-NEXT: s_ashr_i32 s9, s3, 24
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s4
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s5
+; GCN-HSA-NEXT: s_lshr_b32 s4, s2, 16
+; GCN-HSA-NEXT: s_ashr_i32 s5, s3, 31
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s7
+; GCN-HSA-NEXT: s_ashr_i32 s7, s3, 24
+; GCN-HSA-NEXT: v_mov_b32_e32 v8, s24
+; GCN-HSA-NEXT: s_lshr_b32 s24, s2, 24
+; GCN-HSA-NEXT: s_lshr_b32 s2, s2, 8
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s9
+; GCN-HSA-NEXT: v_mov_b32_e32 v6, s7
+; GCN-HSA-NEXT: v_mov_b32_e32 v7, s5
+; GCN-HSA-NEXT: v_mov_b32_e32 v9, s25
; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x80000
-; GCN-HSA-NEXT: s_lshr_b32 s12, s4, 16
-; GCN-HSA-NEXT: s_lshr_b32 s14, s4, 24
-; GCN-HSA-NEXT: s_lshr_b32 s16, s4, 8
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[4:5], 0x80000
-; GCN-HSA-NEXT: s_ashr_i32 s4, s5, 24
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s2
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s3
-; GCN-HSA-NEXT: s_lshr_b32 s2, s5, 16
-; GCN-HSA-NEXT: s_ashr_i32 s3, s5, 31
-; GCN-HSA-NEXT: v_mov_b32_e32 v10, s4
-; GCN-HSA-NEXT: s_lshr_b32 s4, s5, 8
-; GCN-HSA-NEXT: s_mov_b32 s24, s5
-; GCN-HSA-NEXT: v_mov_b32_e32 v6, s9
-; GCN-HSA-NEXT: v_mov_b32_e32 v7, s7
-; GCN-HSA-NEXT: v_mov_b32_e32 v11, s3
; GCN-HSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
; GCN-HSA-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000
-; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x80000
; GCN-HSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
; GCN-HSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
; GCN-HSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000
@@ -7070,66 +7074,66 @@ define amdgpu_kernel void @global_sextload_v16i8_to_v16i64(ptr addrspace(1) %out
; GCN-HSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
; GCN-HSA-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000
; GCN-HSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000
-; GCN-HSA-NEXT: v_mov_b32_e32 v4, s16
-; GCN-HSA-NEXT: v_mov_b32_e32 v5, s17
-; GCN-HSA-NEXT: v_mov_b32_e32 v12, s6
-; GCN-HSA-NEXT: s_add_u32 s6, s0, 0x50
-; GCN-HSA-NEXT: flat_store_dwordx4 v[8:9], v[2:5]
-; GCN-HSA-NEXT: v_mov_b32_e32 v13, s7
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s6
+; GCN-HSA-NEXT: s_add_u32 s6, s0, 0x70
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s7
; GCN-HSA-NEXT: s_addc_u32 s7, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v4, s6
-; GCN-HSA-NEXT: v_mov_b32_e32 v5, s7
-; GCN-HSA-NEXT: s_add_u32 s6, s0, 64
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s7
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s6
+; GCN-HSA-NEXT: s_add_u32 s6, s0, 0x60
; GCN-HSA-NEXT: s_addc_u32 s7, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v9, s7
+; GCN-HSA-NEXT: flat_store_dwordx4 v[10:11], v[2:5]
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s7
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s6
+; GCN-HSA-NEXT: s_add_u32 s6, s0, 0x50
+; GCN-HSA-NEXT: s_addc_u32 s7, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v15, s7
; GCN-HSA-NEXT: v_mov_b32_e32 v2, s10
; GCN-HSA-NEXT: v_mov_b32_e32 v3, s11
-; GCN-HSA-NEXT: v_mov_b32_e32 v8, s6
-; GCN-HSA-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
-; GCN-HSA-NEXT: v_mov_b32_e32 v8, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 16
-; GCN-HSA-NEXT: v_mov_b32_e32 v9, s3
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v17, s3
-; GCN-HSA-NEXT: v_mov_b32_e32 v14, s8
-; GCN-HSA-NEXT: v_mov_b32_e32 v15, s9
-; GCN-HSA-NEXT: v_mov_b32_e32 v16, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x70
-; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[12:15]
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s8
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s9
+; GCN-HSA-NEXT: v_mov_b32_e32 v14, s6
+; GCN-HSA-NEXT: s_add_u32 s6, s0, 64
+; GCN-HSA-NEXT: flat_store_dwordx4 v[10:11], v[2:5]
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s12
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s13
+; GCN-HSA-NEXT: v_mov_b32_e32 v12, s14
+; GCN-HSA-NEXT: v_mov_b32_e32 v13, s15
+; GCN-HSA-NEXT: s_addc_u32 s7, s1, 0
+; GCN-HSA-NEXT: flat_store_dwordx4 v[14:15], v[10:13]
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s16
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s7
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s17
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s6
+; GCN-HSA-NEXT: flat_store_dwordx4 v[10:11], v[0:3]
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s2
+; GCN-HSA-NEXT: s_add_u32 s2, s0, 48
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s3
; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v12, s12
-; GCN-HSA-NEXT: v_mov_b32_e32 v13, s13
-; GCN-HSA-NEXT: v_mov_b32_e32 v14, s14
-; GCN-HSA-NEXT: v_mov_b32_e32 v15, s15
-; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[12:15]
-; GCN-HSA-NEXT: v_mov_b32_e32 v17, s3
-; GCN-HSA-NEXT: v_mov_b32_e32 v16, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x60
+; GCN-HSA-NEXT: v_mov_b32_e32 v19, s3
+; GCN-HSA-NEXT: v_mov_b32_e32 v18, s2
+; GCN-HSA-NEXT: s_add_u32 s2, s0, 32
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s18
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s19
; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[4:7]
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s22
+; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[4:7]
+; GCN-HSA-NEXT: s_add_u32 s0, s0, 16
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s3
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s23
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s20
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s21
+; GCN-HSA-NEXT: v_mov_b32_e32 v12, s22
+; GCN-HSA-NEXT: v_mov_b32_e32 v13, s23
+; GCN-HSA-NEXT: v_mov_b32_e32 v14, s20
+; GCN-HSA-NEXT: v_mov_b32_e32 v15, s21
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s2
-; GCN-HSA-NEXT: s_add_u32 s2, s0, 48
-; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT: s_add_u32 s0, s0, 32
-; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s0
-; GCN-HSA-NEXT: v_mov_b32_e32 v12, s24
-; GCN-HSA-NEXT: v_mov_b32_e32 v13, s25
-; GCN-HSA-NEXT: v_mov_b32_e32 v14, s4
-; GCN-HSA-NEXT: v_mov_b32_e32 v15, s5
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s1
-; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[12:15]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[12:15]
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s1
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s4
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s5
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s24
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s25
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s0
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[8:11]
; GCN-HSA-NEXT: s_endpgm
;
; GCN-NOHSA-VI-LABEL: global_sextload_v16i8_to_v16i64:
@@ -7146,81 +7150,80 @@ define amdgpu_kernel void @global_sextload_v16i8_to_v16i64(ptr addrspace(1) %out
; GCN-NOHSA-VI-NEXT: s_mov_b32 s0, s4
; GCN-NOHSA-VI-NEXT: s_mov_b32 s1, s5
; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(0)
-; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s4, v2
-; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s6, v0
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s14, s4, 16
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s16, s4, 24
-; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s5, v3
-; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s7, v1
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s18, s4, 8
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s20, s6, 16
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s22, s6, 24
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s24, s6, 8
+; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s9, v3
+; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s8, v2
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s16, s9, 16
+; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s4, v0
+; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s5, v1
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s18, s9, 8
+; GCN-NOHSA-VI-NEXT: s_mov_b32 s20, s9
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s22, s8, 16
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s24, s8, 24
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s26, s8, 8
+; GCN-NOHSA-VI-NEXT: s_ashr_i32 s35, s9, 31
+; GCN-NOHSA-VI-NEXT: s_ashr_i32 s38, s9, 24
; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s26, s5, 16
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s10, s5, 8
-; GCN-NOHSA-VI-NEXT: s_mov_b32 s12, s5
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s8, s7, 16
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s28, s7, 8
-; GCN-NOHSA-VI-NEXT: s_mov_b32 s30, s7
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[34:35], s[6:7], 0x80000
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[36:37], s[4:5], 0x80000
-; GCN-NOHSA-VI-NEXT: s_ashr_i32 s29, s7, 31
-; GCN-NOHSA-VI-NEXT: s_ashr_i32 s31, s7, 24
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s28, s5, 16
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s12, s5, 8
+; GCN-NOHSA-VI-NEXT: s_mov_b32 s14, s5
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s10, s4, 16
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s30, s4, 24
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s34, s4, 8
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[36:37], s[8:9], 0x80000
+; GCN-NOHSA-VI-NEXT: s_ashr_i32 s31, s5, 31
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000
; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v6, s14
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v7, s15
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v8, s16
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v9, s17
-; GCN-NOHSA-VI-NEXT: s_ashr_i32 s33, s5, 31
-; GCN-NOHSA-VI-NEXT: s_ashr_i32 s38, s5, 24
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[6:7], s[30:31], 0x80000
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[4:5], s[28:29], 0x80000
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s38
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s35
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s16
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s17
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[6:7], s[4:5], 0x80000
+; GCN-NOHSA-VI-NEXT: s_ashr_i32 s33, s5, 24
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[4:5], s[34:35], 0x80000
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[8:9], s[30:31], 0x80000
; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s36
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s37
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v4, s34
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v5, s35
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s18
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s19
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:80
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v16, s20
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v17, s21
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v18, s22
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v19, s23
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v6, s24
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v7, s25
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v10, s38
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v11, s33
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v14, s31
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v15, s29
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v8, s26
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v9, s27
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v12, s8
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s12
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s13
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s10
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s11
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v13, s9
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:16
-; GCN-NOHSA-VI-NEXT: s_nop 0
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v16, s6
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v17, s7
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v18, s4
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v19, s5
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:112
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v4, s36
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v5, s37
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v14, s20
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v15, s21
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v16, s18
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v17, s19
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v6, s26
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s22
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s23
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s24
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s25
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v7, s27
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v10, s33
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v11, s31
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v12, s6
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v13, s7
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v8, s28
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v9, s29
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:96
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v18, s12
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v16, s14
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v17, s15
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v19, s13
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v14, s4
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s10
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s11
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s8
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s9
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v15, s5
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:64
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:48
; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:32
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0
; GCN-NOHSA-VI-NEXT: s_endpgm
;
; EG-LABEL: global_sextload_v16i8_to_v16i64:
@@ -8176,166 +8179,166 @@ define amdgpu_kernel void @global_sextload_v32i8_to_v32i64(ptr addrspace(1) %out
; GCN-NOHSA-SI-NEXT: s_mov_b32 s0, s4
; GCN-NOHSA-SI-NEXT: s_mov_b32 s1, s5
; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(1)
-; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s18, v2
-; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s19, v3
-; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s22, v0
-; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s23, v1
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s36, v0
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s37, v1
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s38, v2
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s39, v3
; GCN-NOHSA-SI-NEXT: s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s24, v6
-; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s25, v7
-; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s12, v4
-; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s13, v5
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s38, s18, 16
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s36, s18, 24
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s34, s18, 8
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s30, s22, 16
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s22, 24
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s26, s22, 8
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s4, s24, 16
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s6, s24, 24
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s8, s24, 8
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s10, s12, 16
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s14, s12, 24
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s16, s12, 8
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s20, s19, 16
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[40:41], s[12:13], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[42:43], s[18:19], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[44:45], s[22:23], 0x80000
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s6, v4
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s7, v5
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s8, v6
+; GCN-NOHSA-SI-NEXT: v_readfirstlane_b32 s9, v7
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s30, s39, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s34, s39, 8
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s42, s39
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s38, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s26, s38, 24
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s24, s38, 8
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s4, s37, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s10, s37, 8
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s40, s37
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s12, s36, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s14, s36, 24
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s16, s36, 8
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s18, s9, 16
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s20, s9, 8
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s22, s9
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s5, s39, 31
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s11, s39, 24
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[44:45], s[38:39], 0x80000
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s42
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s43
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[42:43], s[24:25], 0x80000
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s38, s8, 16
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s44
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s45
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s18, s19, 8
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s42
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s43
-; GCN-NOHSA-SI-NEXT: s_mov_b32 s12, s19
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s40
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s41
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s40, s23, 16
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s42, s8, 24
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x80000
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s11
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s11, s37, 31
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s5
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s5, s37, 24
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s38
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s39
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s38, s23, 8
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s36
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s37
-; GCN-NOHSA-SI-NEXT: s_mov_b32 s22, s23
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[12:13], 0x80000
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s40
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s41
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s40, s8, 8
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s36
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s37
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s36, s7, 16
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:208
-; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s36
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s37
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s36, s25, 16
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s30
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s31
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s44, s7, 8
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s34
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s35
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s24, s25, 8
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:192
-; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s30
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s25, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s39, s23, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s41, s23, 24
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s5, s19, 31
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s7, s19, 24
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s44, s25, 24
-; GCN-NOHSA-SI-NEXT: s_mov_b32 s12, s25
+; GCN-NOHSA-SI-NEXT: s_mov_b32 s34, s7
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:240
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s28
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s29
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s6, 16
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s28
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s29
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s28, s13, 16
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s26
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s27
-; GCN-NOHSA-SI-NEXT: s_lshr_b32 s26, s13, 8
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:144
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v18, s26
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v19, s27
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s26, s6, 24
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s7
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s45, s13, 31
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s5
-; GCN-NOHSA-SI-NEXT: s_ashr_i32 s46, s13, 24
-; GCN-NOHSA-SI-NEXT: s_mov_b32 s30, s13
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[34:35], s[12:13], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[42:43], s[22:23], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[26:27], 0x80000
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s24
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s25
+; GCN-NOHSA-SI-NEXT: s_lshr_b32 s24, s6, 8
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:224
+; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s5
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s5, s9, 31
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s11
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s11, s9, 24
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:208
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s11
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s33, s7, 31
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s5
+; GCN-NOHSA-SI-NEXT: s_ashr_i32 s41, s7, 24
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[30:31], s[6:7], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[46:47], s[8:9], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[48:49], s[22:23], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[24:25], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[26:27], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[22:23], s[28:29], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[24:25], s[44:45], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[26:27], s[36:37], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[28:29], s[38:39], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[40:41], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[28:29], s[40:41], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[36:37], s[42:43], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
+; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000
; GCN-NOHSA-SI-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:128
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:192
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s4
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s5
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s6
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s7
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:80
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s48
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s49
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:176
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s41
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s39
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s8
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s9
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:64
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s10
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s11
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s46
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s47
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s10
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s11
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:160
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s14
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s15
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s12
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s13
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s14
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s15
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:144
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s42
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s43
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s16
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s17
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s44
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s41
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s33
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s20
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s21
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:240
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s16
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s17
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:128
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s34
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s35
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s18
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s19
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:112
+; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(1)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v16, s30
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v17, s31
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s34
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s35
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v18, s18
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v19, s19
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:224
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v12, s46
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v13, s45
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s36
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s37
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:176
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s20
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s21
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:96
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s38
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s39
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s30
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s31
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s28
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s29
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[6:9], off, s[0:3], 0 offset:160
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v4, s36
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v5, s37
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[2:5], off, s[0:3], 0 offset:80
; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(0)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s28
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s29
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v8, s26
; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v9, s27
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:112
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s24
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s25
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96
-; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(1)
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v10, s22
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v11, s23
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:48
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v6, s12
-; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v7, s13
-; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:48
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v14, s24
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v15, s25
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT: s_waitcnt expcnt(2)
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v0, s22
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v1, s23
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v2, s8
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v3, s9
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v18, s6
+; GCN-NOHSA-SI-NEXT: v_mov_b32_e32 v19, s7
+; GCN-NOHSA-SI-NEXT: buffer_store_dwordx4 v[16:19], off, s[0:3], 0
; GCN-NOHSA-SI-NEXT: s_endpgm
;
; GCN-HSA-LABEL: global_sextload_v32i8_to_v32i64:
@@ -8348,223 +8351,225 @@ define amdgpu_kernel void @global_sextload_v32i8_to_v32i64(ptr addrspace(1) %out
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
; GCN-HSA-NEXT: s_add_u32 s2, s2, 16
-; GCN-HSA-NEXT: flat_load_dwordx4 v[4:7], v[0:1]
+; GCN-HSA-NEXT: flat_load_dwordx4 v[6:9], v[0:1]
; GCN-HSA-NEXT: s_addc_u32 s3, s3, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
-; GCN-HSA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
+; GCN-HSA-NEXT: flat_load_dwordx4 v[2:5], v[0:1]
; GCN-HSA-NEXT: s_waitcnt vmcnt(1)
+; GCN-HSA-NEXT: v_readfirstlane_b32 s10, v8
+; GCN-HSA-NEXT: v_readfirstlane_b32 s11, v9
; GCN-HSA-NEXT: v_readfirstlane_b32 s6, v6
-; GCN-HSA-NEXT: v_readfirstlane_b32 s8, v4
-; GCN-HSA-NEXT: v_readfirstlane_b32 s9, v5
; GCN-HSA-NEXT: v_readfirstlane_b32 s7, v7
-; GCN-HSA-NEXT: s_lshr_b32 s20, s6, 16
-; GCN-HSA-NEXT: s_lshr_b32 s18, s6, 24
-; GCN-HSA-NEXT: s_lshr_b32 s10, s8, 16
-; GCN-HSA-NEXT: s_lshr_b32 s2, s8, 24
-; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[8:9], 0x80000
-; GCN-HSA-NEXT: s_lshr_b32 s16, s6, 8
-; GCN-HSA-NEXT: s_lshr_b32 s4, s8, 8
-; GCN-HSA-NEXT: s_lshr_b32 s12, s7, 16
-; GCN-HSA-NEXT: s_lshr_b32 s14, s7, 8
-; GCN-HSA-NEXT: s_bfe_i64 s[24:25], s[6:7], 0x80000
-; GCN-HSA-NEXT: s_lshr_b32 s6, s9, 16
-; GCN-HSA-NEXT: s_mov_b32 s28, s9
-; GCN-HSA-NEXT: v_mov_b32_e32 v4, s26
-; GCN-HSA-NEXT: v_mov_b32_e32 v5, s27
-; GCN-HSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
-; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[18:19], 0x80000
+; GCN-HSA-NEXT: s_lshr_b32 s16, s11, 16
+; GCN-HSA-NEXT: s_lshr_b32 s18, s11, 8
+; GCN-HSA-NEXT: s_mov_b32 s24, s11
+; GCN-HSA-NEXT: s_lshr_b32 s22, s10, 16
+; GCN-HSA-NEXT: s_lshr_b32 s14, s10, 24
+; GCN-HSA-NEXT: s_lshr_b32 s2, s10, 8
+; GCN-HSA-NEXT: s_lshr_b32 s4, s7, 16
+; GCN-HSA-NEXT: s_lshr_b32 s8, s7, 8
+; GCN-HSA-NEXT: s_mov_b32 s12, s7
+; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[10:11], 0x80000
+; GCN-HSA-NEXT: s_lshr_b32 s10, s6, 16
+; GCN-HSA-NEXT: s_lshr_b32 s28, s6, 24
+; GCN-HSA-NEXT: s_lshr_b32 s30, s6, 8
+; GCN-HSA-NEXT: s_ashr_i32 s43, s7, 31
+; GCN-HSA-NEXT: s_ashr_i32 s52, s7, 24
+; GCN-HSA-NEXT: s_bfe_i64 s[20:21], s[6:7], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[6:7], s[16:17], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[16:17], s[24:25], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
; GCN-HSA-NEXT: s_waitcnt vmcnt(0)
-; GCN-HSA-NEXT: v_readfirstlane_b32 s40, v2
-; GCN-HSA-NEXT: v_readfirstlane_b32 s41, v3
-; GCN-HSA-NEXT: s_bfe_i64 s[42:43], s[10:11], 0x80000
-; GCN-HSA-NEXT: v_readfirstlane_b32 s44, v0
-; GCN-HSA-NEXT: v_readfirstlane_b32 s45, v1
+; GCN-HSA-NEXT: v_readfirstlane_b32 s24, v2
+; GCN-HSA-NEXT: v_readfirstlane_b32 s25, v3
+; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
+; GCN-HSA-NEXT: v_readfirstlane_b32 s36, v4
+; GCN-HSA-NEXT: v_readfirstlane_b32 s37, v5
; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x80000
-; GCN-HSA-NEXT: s_mov_b32 s22, s7
-; GCN-HSA-NEXT: s_lshr_b32 s8, s9, 8
-; GCN-HSA-NEXT: v_mov_b32_e32 v8, s24
-; GCN-HSA-NEXT: v_mov_b32_e32 v9, s25
-; GCN-HSA-NEXT: s_bfe_i64 s[30:31], s[16:17], 0x80000
-; GCN-HSA-NEXT: s_bfe_i64 s[10:11], s[28:29], 0x80000
-; GCN-HSA-NEXT: s_bfe_i64 s[16:17], s[6:7], 0x80000
-; GCN-HSA-NEXT: s_bfe_i64 s[18:19], s[14:15], 0x80000
-; GCN-HSA-NEXT: s_bfe_i64 s[24:25], s[12:13], 0x80000
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s26
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s27
+; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[30:31], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[30:31], s[28:29], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[34:35], s[10:11], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[44:45], s[12:13], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[40:41], s[8:9], 0x80000
; GCN-HSA-NEXT: s_bfe_i64 s[46:47], s[4:5], 0x80000
-; GCN-HSA-NEXT: v_mov_b32_e32 v12, s20
-; GCN-HSA-NEXT: v_mov_b32_e32 v14, s26
-; GCN-HSA-NEXT: v_mov_b32_e32 v15, s27
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s42
+; GCN-HSA-NEXT: v_mov_b32_e32 v8, s6
+; GCN-HSA-NEXT: v_mov_b32_e32 v14, s18
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s22
+; GCN-HSA-NEXT: v_mov_b32_e32 v6, s14
; GCN-HSA-NEXT: v_mov_b32_e32 v2, s2
-; GCN-HSA-NEXT: s_lshr_b32 s42, s44, 16
-; GCN-HSA-NEXT: s_lshr_b32 s48, s44, 24
-; GCN-HSA-NEXT: s_lshr_b32 s28, s44, 8
-; GCN-HSA-NEXT: s_lshr_b32 s6, s45, 16
-; GCN-HSA-NEXT: s_lshr_b32 s2, s45, 8
-; GCN-HSA-NEXT: s_mov_b32 s4, s45
-; GCN-HSA-NEXT: s_bfe_i64 s[26:27], s[44:45], 0x80000
-; GCN-HSA-NEXT: s_lshr_b32 s44, s40, 16
-; GCN-HSA-NEXT: s_lshr_b32 s50, s40, 24
-; GCN-HSA-NEXT: s_lshr_b32 s52, s40, 8
-; GCN-HSA-NEXT: s_lshr_b32 s20, s41, 16
-; GCN-HSA-NEXT: s_lshr_b32 s12, s41, 8
-; GCN-HSA-NEXT: s_mov_b32 s14, s41
-; GCN-HSA-NEXT: s_ashr_i32 s33, s9, 31
-; GCN-HSA-NEXT: s_ashr_i32 s37, s7, 31
-; GCN-HSA-NEXT: s_ashr_i32 s38, s7, 24
-; GCN-HSA-NEXT: s_ashr_i32 s34, s9, 24
-; GCN-HSA-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000
-; GCN-HSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
-; GCN-HSA-NEXT: v_mov_b32_e32 v13, s21
-; GCN-HSA-NEXT: v_mov_b32_e32 v10, s30
-; GCN-HSA-NEXT: v_mov_b32_e32 v11, s31
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s43
; GCN-HSA-NEXT: v_mov_b32_e32 v3, s3
-; GCN-HSA-NEXT: s_ashr_i32 s30, s45, 31
-; GCN-HSA-NEXT: s_ashr_i32 s31, s45, 24
-; GCN-HSA-NEXT: s_ashr_i32 s35, s41, 31
-; GCN-HSA-NEXT: s_ashr_i32 s36, s41, 24
-; GCN-HSA-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x80000
+; GCN-HSA-NEXT: s_lshr_b32 s14, s25, 16
+; GCN-HSA-NEXT: s_lshr_b32 s10, s25, 8
+; GCN-HSA-NEXT: s_mov_b32 s12, s25
+; GCN-HSA-NEXT: s_lshr_b32 s8, s24, 16
+; GCN-HSA-NEXT: s_lshr_b32 s6, s24, 24
+; GCN-HSA-NEXT: s_lshr_b32 s4, s24, 8
+; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[24:25], 0x80000
+; GCN-HSA-NEXT: s_lshr_b32 s38, s37, 16
+; GCN-HSA-NEXT: s_lshr_b32 s28, s37, 8
+; GCN-HSA-NEXT: s_mov_b32 s48, s37
+; GCN-HSA-NEXT: s_lshr_b32 s24, s36, 16
+; GCN-HSA-NEXT: s_lshr_b32 s22, s36, 24
+; GCN-HSA-NEXT: s_lshr_b32 s18, s36, 8
+; GCN-HSA-NEXT: s_ashr_i32 s50, s11, 31
+; GCN-HSA-NEXT: s_ashr_i32 s51, s11, 24
+; GCN-HSA-NEXT: v_mov_b32_e32 v9, s7
+; GCN-HSA-NEXT: v_mov_b32_e32 v12, s16
+; GCN-HSA-NEXT: v_mov_b32_e32 v13, s17
+; GCN-HSA-NEXT: v_mov_b32_e32 v15, s19
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s23
+; GCN-HSA-NEXT: v_mov_b32_e32 v7, s15
+; GCN-HSA-NEXT: s_ashr_i32 s33, s25, 31
+; GCN-HSA-NEXT: s_ashr_i32 s42, s25, 24
+; GCN-HSA-NEXT: s_ashr_i32 s53, s37, 31
+; GCN-HSA-NEXT: s_ashr_i32 s54, s37, 24
+; GCN-HSA-NEXT: s_bfe_i64 s[16:17], s[36:37], 0x80000
; GCN-HSA-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x80000
-; GCN-HSA-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x80000
; GCN-HSA-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x80000
-; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x80000
; GCN-HSA-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
-; GCN-HSA-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
+; GCN-HSA-NEXT: s_bfe_i64 s[36:37], s[48:49], 0x80000
; GCN-HSA-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000
-; GCN-HSA-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x80000
-; GCN-HSA-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000
-; GCN-HSA-NEXT: s_bfe_i64 s[52:53], s[52:53], 0x80000
-; GCN-HSA-NEXT: s_bfe_i64 s[50:51], s[50:51], 0x80000
-; GCN-HSA-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x80000
-; GCN-HSA-NEXT: s_add_u32 s54, s0, 0x50
-; GCN-HSA-NEXT: s_addc_u32 s55, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v24, s54
-; GCN-HSA-NEXT: v_mov_b32_e32 v25, s55
-; GCN-HSA-NEXT: s_add_u32 s54, s0, 64
-; GCN-HSA-NEXT: s_addc_u32 s55, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v26, s54
-; GCN-HSA-NEXT: v_mov_b32_e32 v27, s55
-; GCN-HSA-NEXT: s_add_u32 s54, s0, 16
-; GCN-HSA-NEXT: s_addc_u32 s55, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v20, s40
-; GCN-HSA-NEXT: s_add_u32 s40, s0, 0xd0
-; GCN-HSA-NEXT: v_mov_b32_e32 v21, s41
-; GCN-HSA-NEXT: s_addc_u32 s41, s1, 0
-; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[8:11]
-; GCN-HSA-NEXT: v_mov_b32_e32 v28, s54
-; GCN-HSA-NEXT: v_mov_b32_e32 v8, s40
-; GCN-HSA-NEXT: v_mov_b32_e32 v9, s41
-; GCN-HSA-NEXT: s_add_u32 s40, s0, 0xc0
-; GCN-HSA-NEXT: v_mov_b32_e32 v29, s55
-; GCN-HSA-NEXT: s_addc_u32 s41, s1, 0
+; GCN-HSA-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000
+; GCN-HSA-NEXT: s_add_u32 s48, s0, 0x70
+; GCN-HSA-NEXT: s_addc_u32 s49, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v18, s48
+; GCN-HSA-NEXT: v_mov_b32_e32 v19, s49
+; GCN-HSA-NEXT: s_add_u32 s48, s0, 0x60
+; GCN-HSA-NEXT: s_addc_u32 s49, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v24, s48
+; GCN-HSA-NEXT: v_mov_b32_e32 v25, s49
+; GCN-HSA-NEXT: s_add_u32 s48, s0, 0x50
+; GCN-HSA-NEXT: s_addc_u32 s49, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v26, s48
+; GCN-HSA-NEXT: v_mov_b32_e32 v27, s49
+; GCN-HSA-NEXT: s_add_u32 s48, s0, 64
+; GCN-HSA-NEXT: s_addc_u32 s49, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v20, s44
+; GCN-HSA-NEXT: s_add_u32 s44, s0, 48
+; GCN-HSA-NEXT: v_mov_b32_e32 v21, s45
+; GCN-HSA-NEXT: s_addc_u32 s45, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v28, s48
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s51
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s50
+; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[4:7]
+; GCN-HSA-NEXT: v_mov_b32_e32 v29, s49
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s34
+; GCN-HSA-NEXT: s_add_u32 s34, s0, 32
+; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[8:11]
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s35
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s44
+; GCN-HSA-NEXT: s_addc_u32 s35, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v16, s46
+; GCN-HSA-NEXT: v_mov_b32_e32 v17, s47
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s45
+; GCN-HSA-NEXT: v_mov_b32_e32 v18, s52
+; GCN-HSA-NEXT: v_mov_b32_e32 v19, s43
; GCN-HSA-NEXT: flat_store_dwordx4 v[28:29], v[0:3]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[10:11], v[16:19]
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s20
+; GCN-HSA-NEXT: s_add_u32 s20, s0, 16
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s21
+; GCN-HSA-NEXT: s_addc_u32 s21, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v16, s20
+; GCN-HSA-NEXT: v_mov_b32_e32 v17, s21
+; GCN-HSA-NEXT: s_add_u32 s20, s0, 0xf0
; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[12:15]
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s26
-; GCN-HSA-NEXT: s_add_u32 s26, s0, 0x90
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s27
-; GCN-HSA-NEXT: s_addc_u32 s27, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v24, s26
-; GCN-HSA-NEXT: v_mov_b32_e32 v25, s27
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s1
-; GCN-HSA-NEXT: s_add_u32 s26, s0, 0x80
-; GCN-HSA-NEXT: v_mov_b32_e32 v6, s46
-; GCN-HSA-NEXT: v_mov_b32_e32 v7, s47
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s0
-; GCN-HSA-NEXT: s_addc_u32 s27, s1, 0
-; GCN-HSA-NEXT: flat_store_dwordx4 v[2:3], v[4:7]
-; GCN-HSA-NEXT: v_mov_b32_e32 v16, s44
-; GCN-HSA-NEXT: v_mov_b32_e32 v4, s24
-; GCN-HSA-NEXT: s_add_u32 s24, s0, 0x70
-; GCN-HSA-NEXT: v_mov_b32_e32 v17, s45
-; GCN-HSA-NEXT: v_mov_b32_e32 v18, s50
-; GCN-HSA-NEXT: v_mov_b32_e32 v19, s51
-; GCN-HSA-NEXT: v_mov_b32_e32 v10, s40
-; GCN-HSA-NEXT: v_mov_b32_e32 v5, s25
-; GCN-HSA-NEXT: s_addc_u32 s25, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v22, s52
-; GCN-HSA-NEXT: v_mov_b32_e32 v23, s53
-; GCN-HSA-NEXT: v_mov_b32_e32 v12, s42
-; GCN-HSA-NEXT: v_mov_b32_e32 v13, s43
-; GCN-HSA-NEXT: v_mov_b32_e32 v11, s41
-; GCN-HSA-NEXT: v_mov_b32_e32 v14, s48
-; GCN-HSA-NEXT: v_mov_b32_e32 v15, s49
-; GCN-HSA-NEXT: v_mov_b32_e32 v26, s26
-; GCN-HSA-NEXT: flat_store_dwordx4 v[8:9], v[16:19]
-; GCN-HSA-NEXT: flat_store_dwordx4 v[10:11], v[20:23]
-; GCN-HSA-NEXT: v_mov_b32_e32 v10, s18
-; GCN-HSA-NEXT: s_add_u32 s18, s0, 0x60
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s28
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s29
-; GCN-HSA-NEXT: v_mov_b32_e32 v27, s27
-; GCN-HSA-NEXT: flat_store_dwordx4 v[24:25], v[12:15]
-; GCN-HSA-NEXT: v_mov_b32_e32 v11, s19
-; GCN-HSA-NEXT: v_mov_b32_e32 v12, s24
-; GCN-HSA-NEXT: s_addc_u32 s19, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v14, s18
-; GCN-HSA-NEXT: v_mov_b32_e32 v8, s22
-; GCN-HSA-NEXT: v_mov_b32_e32 v6, s38
-; GCN-HSA-NEXT: v_mov_b32_e32 v7, s37
-; GCN-HSA-NEXT: v_mov_b32_e32 v9, s23
-; GCN-HSA-NEXT: v_mov_b32_e32 v13, s25
-; GCN-HSA-NEXT: v_mov_b32_e32 v15, s19
-; GCN-HSA-NEXT: flat_store_dwordx4 v[26:27], v[0:3]
-; GCN-HSA-NEXT: flat_store_dwordx4 v[12:13], v[4:7]
-; GCN-HSA-NEXT: flat_store_dwordx4 v[14:15], v[8:11]
+; GCN-HSA-NEXT: s_addc_u32 s21, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v14, s34
+; GCN-HSA-NEXT: v_mov_b32_e32 v18, s20
+; GCN-HSA-NEXT: v_mov_b32_e32 v22, s40
+; GCN-HSA-NEXT: v_mov_b32_e32 v23, s41
+; GCN-HSA-NEXT: v_mov_b32_e32 v15, s35
+; GCN-HSA-NEXT: v_mov_b32_e32 v6, s30
+; GCN-HSA-NEXT: v_mov_b32_e32 v7, s31
+; GCN-HSA-NEXT: v_mov_b32_e32 v19, s21
+; GCN-HSA-NEXT: s_add_u32 s20, s0, 0xe0
+; GCN-HSA-NEXT: flat_store_dwordx4 v[14:15], v[20:23]
+; GCN-HSA-NEXT: s_addc_u32 s21, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v20, s20
+; GCN-HSA-NEXT: flat_store_dwordx4 v[16:17], v[4:7]
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s26
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s1
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s27
+; GCN-HSA-NEXT: v_mov_b32_e32 v21, s21
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s0
+; GCN-HSA-NEXT: s_add_u32 s20, s0, 0xd0
+; GCN-HSA-NEXT: v_mov_b32_e32 v8, s38
+; GCN-HSA-NEXT: v_mov_b32_e32 v9, s39
+; GCN-HSA-NEXT: v_mov_b32_e32 v10, s54
+; GCN-HSA-NEXT: v_mov_b32_e32 v11, s53
+; GCN-HSA-NEXT: v_mov_b32_e32 v12, s36
+; GCN-HSA-NEXT: v_mov_b32_e32 v13, s37
+; GCN-HSA-NEXT: v_mov_b32_e32 v14, s28
+; GCN-HSA-NEXT: v_mov_b32_e32 v15, s29
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[18:19], v[8:11]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[20:21], v[12:15]
+; GCN-HSA-NEXT: s_addc_u32 s21, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s20
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s24
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s25
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s22
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s23
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s21
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT: s_nop 0
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s16
-; GCN-HSA-NEXT: s_add_u32 s16, s0, 48
+; GCN-HSA-NEXT: s_add_u32 s16, s0, 0xc0
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s17
; GCN-HSA-NEXT: s_addc_u32 s17, s1, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s16
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s34
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s33
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s18
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s19
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s17
; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: s_nop 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s8
-; GCN-HSA-NEXT: s_add_u32 s8, s0, 32
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s9
-; GCN-HSA-NEXT: s_addc_u32 s9, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v4, s8
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s10
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s11
-; GCN-HSA-NEXT: v_mov_b32_e32 v5, s9
-; GCN-HSA-NEXT: s_add_u32 s8, s0, 0xf0
-; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GCN-HSA-NEXT: s_addc_u32 s9, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v4, s8
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s20
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s21
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s36
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s35
-; GCN-HSA-NEXT: v_mov_b32_e32 v5, s9
-; GCN-HSA-NEXT: s_add_u32 s8, s0, 0xe0
-; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
-; GCN-HSA-NEXT: s_addc_u32 s9, s1, 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v4, s8
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s14
+; GCN-HSA-NEXT: s_add_u32 s14, s0, 0xb0
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s15
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s12
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s13
-; GCN-HSA-NEXT: v_mov_b32_e32 v5, s9
+; GCN-HSA-NEXT: s_addc_u32 s15, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s14
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s42
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s33
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s15
; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: s_nop 0
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s6
-; GCN-HSA-NEXT: s_add_u32 s6, s0, 0xb0
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s7
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s10
+; GCN-HSA-NEXT: s_add_u32 s10, s0, 0xa0
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s11
+; GCN-HSA-NEXT: s_addc_u32 s11, s1, 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v4, s10
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s12
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s13
+; GCN-HSA-NEXT: v_mov_b32_e32 v5, s11
+; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-HSA-NEXT: s_nop 0
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s6
+; GCN-HSA-NEXT: s_add_u32 s6, s0, 0x90
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s7
; GCN-HSA-NEXT: s_addc_u32 s7, s1, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s6
-; GCN-HSA-NEXT: s_add_u32 s0, s0, 0xa0
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s31
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s30
+; GCN-HSA-NEXT: s_add_u32 s0, s0, 0x80
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s8
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s9
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s7
; GCN-HSA-NEXT: s_addc_u32 s1, s1, 0
; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: v_mov_b32_e32 v5, s1
-; GCN-HSA-NEXT: v_mov_b32_e32 v0, s4
-; GCN-HSA-NEXT: v_mov_b32_e32 v1, s5
-; GCN-HSA-NEXT: v_mov_b32_e32 v2, s2
-; GCN-HSA-NEXT: v_mov_b32_e32 v3, s3
+; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
+; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
+; GCN-HSA-NEXT: v_mov_b32_e32 v2, s4
+; GCN-HSA-NEXT: v_mov_b32_e32 v3, s5
; GCN-HSA-NEXT: v_mov_b32_e32 v4, s0
; GCN-HSA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GCN-HSA-NEXT: s_endpgm
@@ -8584,155 +8589,155 @@ define amdgpu_kernel void @global_sextload_v32i8_to_v32i64(ptr addrspace(1) %out
; GCN-NOHSA-VI-NEXT: s_mov_b32 s0, s4
; GCN-NOHSA-VI-NEXT: s_mov_b32 s1, s5
; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(1)
-; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s4, v2
+; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s9, v3
+; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s8, v2
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s30, s9, 16
+; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s11, v1
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s38, s9, 8
+; GCN-NOHSA-VI-NEXT: s_mov_b32 s40, s9
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s46, s8, 8
+; GCN-NOHSA-VI-NEXT: s_ashr_i32 s71, s9, 31
+; GCN-NOHSA-VI-NEXT: s_ashr_i32 s72, s9, 24
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000
+; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s10, v0
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s50, s11, 16
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[28:29], s[8:9], 0x80000
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x80000
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x80000
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s72
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s71
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s30
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s31
; GCN-NOHSA-VI-NEXT: s_waitcnt vmcnt(0)
-; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s9, v7
-; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s11, v5
-; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s5, v3
-; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s6, v0
-; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s10, v4
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s38, s4, 16
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s40, s4, 24
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s42, s4, 8
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s60, s9, 8
-; GCN-NOHSA-VI-NEXT: s_ashr_i32 s39, s11, 24
-; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s7, v1
-; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s8, v6
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s48, s6, 8
+; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s12, v4
+; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s13, v5
+; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s14, v6
+; GCN-NOHSA-VI-NEXT: v_readfirstlane_b32 s15, v7
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s52, s11, 8
+; GCN-NOHSA-VI-NEXT: s_mov_b32 s54, s11
+; GCN-NOHSA-VI-NEXT: s_ashr_i32 s69, s11, 31
+; GCN-NOHSA-VI-NEXT: s_ashr_i32 s70, s11, 24
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[50:51], s[50:51], 0x80000
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v4, s40
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v5, s41
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v6, s38
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v7, s39
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:240
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:224
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s28
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s29
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s46
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s47
; GCN-NOHSA-VI-NEXT: s_lshr_b32 s56, s10, 16
; GCN-NOHSA-VI-NEXT: s_lshr_b32 s58, s10, 24
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s36, s10, 8
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s62, s11, 16
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s64, s11, 8
-; GCN-NOHSA-VI-NEXT: s_mov_b32 s66, s11
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[20:21], s[10:11], 0x80000
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[68:69], s[4:5], 0x80000
-; GCN-NOHSA-VI-NEXT: s_ashr_i32 s33, s11, 31
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[10:11], s[60:61], 0x80000
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x80000
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[60:61], s[38:39], 0x80000
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s50, s8, 16
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s52, s8, 24
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[34:35], s[6:7], 0x80000
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x80000
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s68
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s69
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v4, s60
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v5, s61
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v6, s40
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v7, s41
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s42
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s43
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s54, s8, 8
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[54:55], s[54:55], 0x80000
; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[52:53], s[52:53], 0x80000
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[50:51], s[50:51], 0x80000
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:208
; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:192
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[28:29], s[8:9], 0x80000
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s34
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s35
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s48
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s49
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[54:55], s[54:55], 0x80000
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:128
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[58:59], s[58:59], 0x80000
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s60, s10, 8
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s50
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s51
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s70
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s69
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[58:59], s[58:59], 0x80000
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[56:57], s[56:57], 0x80000
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:176
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s48, s15, 16
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s54
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s55
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s52
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s53
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[56:57], s[56:57], 0x80000
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s30, s5, 16
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s28
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s29
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s54
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s55
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s24, s5, 8
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[18:19], s[10:11], 0x80000
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[60:61], s[60:61], 0x80000
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:160
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s34, s15, 8
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s56
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s57
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s58
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s59
-; GCN-NOHSA-VI-NEXT: s_mov_b32 s26, s5
-; GCN-NOHSA-VI-NEXT: s_ashr_i32 s71, s5, 31
-; GCN-NOHSA-VI-NEXT: s_ashr_i32 s72, s5, 24
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s22, s7, 16
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s20
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s21
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s36
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s37
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000
+; GCN-NOHSA-VI-NEXT: s_mov_b32 s36, s15
+; GCN-NOHSA-VI-NEXT: s_ashr_i32 s65, s15, 31
+; GCN-NOHSA-VI-NEXT: s_ashr_i32 s67, s15, 24
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x80000
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:144
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s26, s14, 16
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s18
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s19
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s60
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s61
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s24, s14, 24
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:128
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s22, s14, 8
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s48
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s49
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s67
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s65
; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s16, s7, 8
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s30
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s31
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s72
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s71
-; GCN-NOHSA-VI-NEXT: s_mov_b32 s18, s7
-; GCN-NOHSA-VI-NEXT: s_ashr_i32 s67, s7, 31
-; GCN-NOHSA-VI-NEXT: s_ashr_i32 s70, s7, 24
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s20, s13, 16
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s36
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s37
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s34
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s35
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[6:7], s[14:15], 0x80000
; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:240
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s14, s9, 16
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s62, s13, 8
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s26
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s27
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s24
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s25
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x80000
+; GCN-NOHSA-VI-NEXT: s_mov_b32 s16, s13
+; GCN-NOHSA-VI-NEXT: s_ashr_i32 s33, s13, 31
+; GCN-NOHSA-VI-NEXT: s_ashr_i32 s63, s13, 24
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:80
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s64, s12, 16
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s6
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s7
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s22
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s23
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s66, s12, 24
; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x80000
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:224
-; GCN-NOHSA-VI-NEXT: s_mov_b32 s12, s9
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s22
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s23
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s70
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s67
-; GCN-NOHSA-VI-NEXT: s_ashr_i32 s63, s9, 31
-; GCN-NOHSA-VI-NEXT: s_ashr_i32 s65, s9, 24
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x80000
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:176
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x80000
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s18
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s19
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s16
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s17
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:160
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s44, s6, 16
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s14
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s15
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s65
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s63
-; GCN-NOHSA-VI-NEXT: s_lshr_b32 s46, s6, 24
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[8:9], s[62:63], 0x80000
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[6:7], s[66:67], 0x80000
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[14:15], s[62:63], 0x80000
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:64
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s42, s8, 16
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s20
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s21
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s63
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s33
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s44, s8, 24
+; GCN-NOHSA-VI-NEXT: s_lshr_b32 s68, s12, 8
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[4:5], s[12:13], 0x80000
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[10:11], s[66:67], 0x80000
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[12:13], s[64:65], 0x80000
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[8:9], s[68:69], 0x80000
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s16
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s17
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s14
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s15
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x80000
+; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v8, s42
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s12
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s13
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s10
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s11
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[4:5], s[64:65], 0x80000
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x80000
-; GCN-NOHSA-VI-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x80000
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:96
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v8, s44
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s8
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s9
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s39
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s33
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v9, s45
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v10, s46
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v11, s47
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:144
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s6
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s7
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s4
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s5
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v9, s43
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v10, s44
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v11, s45
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:208
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v2, s8
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, s9
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GCN-NOHSA-VI-NEXT: s_endpgm
;
; EG-LABEL: global_sextload_v32i8_to_v32i64:
diff --git a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
index bd191a37582c0..21519deed4a91 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
@@ -6155,11 +6155,11 @@ define amdgpu_kernel void @local_sextload_v8i16_to_v8i64(ptr addrspace(3) %out,
; SI-NEXT: v_bfe_i32 v8, v1, 0, 16
; SI-NEXT: v_bfe_i32 v2, v2, 0, 16
; SI-NEXT: v_bfe_i32 v10, v9, 0, 16
-; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
; SI-NEXT: v_bfe_i32 v12, v12, 0, 16
+; SI-NEXT: v_bfe_i32 v14, v11, 0, 16
+; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
; SI-NEXT: v_ashrrev_i32_e32 v9, 31, v8
; SI-NEXT: v_ashrrev_i32_e32 v3, 31, v2
-; SI-NEXT: v_bfe_i32 v14, v11, 0, 16
; SI-NEXT: v_ashrrev_i32_e32 v11, 31, v10
; SI-NEXT: v_ashrrev_i32_e32 v13, 31, v12
; SI-NEXT: v_ashrrev_i32_e32 v15, 31, v14
@@ -6821,10 +6821,10 @@ define amdgpu_kernel void @local_sextload_v16i16_to_v16i64(ptr addrspace(3) %out
; SI-NEXT: v_mov_b32_e32 v18, s0
; SI-NEXT: s_waitcnt lgkmcnt(1)
; SI-NEXT: v_mov_b32_e32 v12, v3
-; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v2
-; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v14, v7
+; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v2
+; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v0
; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v6
; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v4
; SI-NEXT: v_ashrrev_i32_e32 v9, 31, v5
@@ -6847,24 +6847,24 @@ define amdgpu_kernel void @local_sextload_v16i16_to_v16i64(ptr addrspace(3) %out
; SI-NEXT: v_bfe_i32 v1, v4, 0, 16
; SI-NEXT: v_bfe_i32 v3, v5, 0, 16
; SI-NEXT: v_bfe_i32 v5, v6, 0, 16
-; SI-NEXT: v_bfe_i32 v10, v0, 0, 16
-; SI-NEXT: v_bfe_i32 v7, v2, 0, 16
+; SI-NEXT: v_bfe_i32 v7, v0, 0, 16
+; SI-NEXT: v_bfe_i32 v10, v2, 0, 16
; SI-NEXT: v_bfe_i32 v12, v19, 0, 16
-; SI-NEXT: v_ashrrev_i32_e32 v2, 31, v1
-; SI-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; SI-NEXT: v_bfe_i32 v14, v17, 0, 16
-; SI-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; SI-NEXT: v_bfe_i32 v16, v16, 0, 16
-; SI-NEXT: v_ashrrev_i32_e32 v11, 31, v10
+; SI-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; SI-NEXT: ds_write2_b64 v18, v[3:4], v[8:9] offset0:2 offset1:3
; SI-NEXT: v_bfe_i32 v3, v15, 0, 16
+; SI-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; SI-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; SI-NEXT: v_ashrrev_i32_e32 v8, 31, v7
+; SI-NEXT: v_ashrrev_i32_e32 v11, 31, v10
; SI-NEXT: v_ashrrev_i32_e32 v13, 31, v12
; SI-NEXT: v_ashrrev_i32_e32 v15, 31, v14
; SI-NEXT: v_ashrrev_i32_e32 v17, 31, v16
; SI-NEXT: v_ashrrev_i32_e32 v4, 31, v3
-; SI-NEXT: ds_write2_b64 v18, v[7:8], v[3:4] offset0:12 offset1:13
-; SI-NEXT: ds_write2_b64 v18, v[10:11], v[16:17] offset0:8 offset1:9
+; SI-NEXT: ds_write2_b64 v18, v[10:11], v[3:4] offset0:12 offset1:13
+; SI-NEXT: ds_write2_b64 v18, v[7:8], v[16:17] offset0:8 offset1:9
; SI-NEXT: ds_write2_b64 v18, v[5:6], v[14:15] offset0:4 offset1:5
; SI-NEXT: ds_write2_b64 v18, v[1:2], v[12:13] offset1:1
; SI-NEXT: s_endpgm
@@ -8116,16 +8116,16 @@ define amdgpu_kernel void @local_sextload_v32i16_to_v32i64(ptr addrspace(3) %out
; SI-NEXT: v_ashrrev_i32_e32 v13, 31, v12
; SI-NEXT: ds_write2_b64 v7, v[10:11], v[12:13] offset0:4 offset1:5
; SI-NEXT: v_bfe_i32 v11, v6, 0, 16
-; SI-NEXT: v_ashrrev_i32_e32 v2, 31, v1
; SI-NEXT: v_bfe_i32 v13, v4, 0, 16
-; SI-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; SI-NEXT: v_bfe_i32 v15, v15, 0, 16
-; SI-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; SI-NEXT: v_bfe_i32 v16, v14, 0, 16
; SI-NEXT: v_ashrrev_i32_e32 v10, 31, v9
; SI-NEXT: v_ashrrev_i32_e32 v17, 31, v16
; SI-NEXT: ds_write2_b64 v7, v[9:10], v[16:17] offset1:1
; SI-NEXT: v_bfe_i32 v17, v18, 0, 16
+; SI-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; SI-NEXT: v_ashrrev_i32_e32 v4, 31, v3
+; SI-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; SI-NEXT: v_ashrrev_i32_e32 v9, 31, v8
; SI-NEXT: v_ashrrev_i32_e32 v12, 31, v11
; SI-NEXT: v_ashrrev_i32_e32 v14, 31, v13
diff --git a/llvm/test/CodeGen/AMDGPU/pr155452.ll b/llvm/test/CodeGen/AMDGPU/pr155452.ll
index d021b210c7f3a..d93203e8dee23 100644
--- a/llvm/test/CodeGen/AMDGPU/pr155452.ll
+++ b/llvm/test/CodeGen/AMDGPU/pr155452.ll
@@ -9,59 +9,59 @@ define amdgpu_kernel void @my_kernel(i64 %foo, i32 %bar) {
; CHECK-NEXT: s_mov_b32 flat_scratch_lo, s13
; CHECK-NEXT: s_add_i32 s12, s12, s17
; CHECK-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; CHECK-NEXT: s_load_dword s0, s[8:9], 0x2
-; CHECK-NEXT: s_load_dwordx2 s[2:3], s[8:9], 0x0
-; CHECK-NEXT: s_mov_b64 s[4:5], 1
+; CHECK-NEXT: s_load_dword s5, s[8:9], 0x2
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; CHECK-NEXT: s_mov_b64 s[2:3], 1
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: s_ashr_i32 s6, s0, 31
-; CHECK-NEXT: s_abs_i32 s7, s0
-; CHECK-NEXT: v_cvt_f32_u32_e32 v2, s7
-; CHECK-NEXT: s_sub_i32 s0, 0, s7
+; CHECK-NEXT: s_ashr_i32 s4, s5, 31
+; CHECK-NEXT: s_abs_i32 s5, s5
+; CHECK-NEXT: v_cvt_f32_u32_e32 v2, s5
+; CHECK-NEXT: s_sub_i32 s6, 0, s5
; CHECK-NEXT: v_rcp_iflag_f32_e32 v2, v2
; CHECK-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
; CHECK-NEXT: v_cvt_u32_f32_e32 v2, v2
-; CHECK-NEXT: v_mul_lo_u32 v3, s0, v2
+; CHECK-NEXT: v_mul_lo_u32 v3, s6, v2
; CHECK-NEXT: v_mul_hi_u32 v3, v2, v3
; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; CHECK-NEXT: s_and_b64 s[0:1], exec, -1
+; CHECK-NEXT: s_and_b64 vcc, exec, -1
; CHECK-NEXT: .LBB0_1: ; %loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: v_mov_b32_e32 v3, s4
-; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[8:9], s2, v3, 1
-; CHECK-NEXT: s_mul_i32 s4, s3, s4
-; CHECK-NEXT: s_mul_i32 s5, s2, s5
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, s4, v4
-; CHECK-NEXT: v_readfirstlane_b32 s4, v3
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, s5, v4
-; CHECK-NEXT: s_ashr_i32 s5, s4, 31
-; CHECK-NEXT: s_abs_i32 s8, s4
-; CHECK-NEXT: s_xor_b32 s5, s5, s6
+; CHECK-NEXT: v_mov_b32_e32 v3, s2
+; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[6:7], s0, v3, 1
+; CHECK-NEXT: s_mul_i32 s6, s1, s2
+; CHECK-NEXT: s_mul_i32 s3, s0, s3
+; CHECK-NEXT: v_readfirstlane_b32 s2, v3
+; CHECK-NEXT: v_readfirstlane_b32 s7, v4
+; CHECK-NEXT: s_add_i32 s6, s6, s7
+; CHECK-NEXT: s_ashr_i32 s7, s2, 31
+; CHECK-NEXT: s_abs_i32 s8, s2
+; CHECK-NEXT: s_add_i32 s3, s3, s6
+; CHECK-NEXT: s_xor_b32 s6, s7, s4
; CHECK-NEXT: v_mul_hi_u32 v3, s8, v2
-; CHECK-NEXT: v_readfirstlane_b32 s9, v3
-; CHECK-NEXT: v_add_i32_e32 v5, vcc, 1, v3
-; CHECK-NEXT: s_mul_i32 s9, s9, s7
+; CHECK-NEXT: v_readfirstlane_b32 s7, v3
+; CHECK-NEXT: s_mul_i32 s9, s7, s5
+; CHECK-NEXT: s_add_i32 s10, s7, 1
; CHECK-NEXT: s_sub_i32 s8, s8, s9
-; CHECK-NEXT: s_sub_i32 s9, s8, s7
-; CHECK-NEXT: s_cmp_ge_u32 s8, s7
-; CHECK-NEXT: s_cselect_b64 vcc, -1, 0
-; CHECK-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
+; CHECK-NEXT: s_sub_i32 s9, s8, s5
+; CHECK-NEXT: s_cmp_ge_u32 s8, s5
+; CHECK-NEXT: s_cselect_b32 s7, s10, s7
; CHECK-NEXT: s_cselect_b32 s8, s9, s8
-; CHECK-NEXT: v_add_i32_e32 v5, vcc, 1, v3
-; CHECK-NEXT: s_cmp_ge_u32 s8, s7
-; CHECK-NEXT: s_cselect_b64 vcc, -1, 0
-; CHECK-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
-; CHECK-NEXT: v_xor_b32_e32 v3, s5, v3
-; CHECK-NEXT: v_subrev_i32_e32 v3, vcc, s5, v3
-; CHECK-NEXT: v_ashrrev_i32_e32 v5, 31, v3
-; CHECK-NEXT: v_or_b32_e32 v3, s4, v3
-; CHECK-NEXT: v_or_b32_e32 v4, v4, v5
+; CHECK-NEXT: s_add_i32 s9, s7, 1
+; CHECK-NEXT: s_cmp_ge_u32 s8, s5
+; CHECK-NEXT: s_cselect_b32 s7, s9, s7
+; CHECK-NEXT: s_xor_b32 s7, s7, s6
+; CHECK-NEXT: s_sub_i32 s6, s7, s6
+; CHECK-NEXT: s_ashr_i32 s7, s6, 31
+; CHECK-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
+; CHECK-NEXT: v_mov_b32_e32 v4, s3
+; CHECK-NEXT: v_mov_b32_e32 v3, s2
; CHECK-NEXT: flat_load_dwordx2 v[3:4], v[3:4]
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[3:4]
-; CHECK-NEXT: s_mov_b64 s[4:5], 0
-; CHECK-NEXT: s_mov_b64 vcc, s[0:1]
+; CHECK-NEXT: s_mov_b64 s[2:3], 0
+; CHECK-NEXT: s_mov_b64 vcc, vcc
; CHECK-NEXT: s_cbranch_vccnz .LBB0_1
; CHECK-NEXT: ; %bb.2: ; %DummyReturnBlock
; CHECK-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv.ll b/llvm/test/CodeGen/AMDGPU/sdiv.ll
index 5c0f813c8c829..668c60c677b6b 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv.ll
@@ -25,36 +25,39 @@ define amdgpu_kernel void @sdiv_i32(ptr addrspace(1) %out, ptr addrspace(1) %in)
; GCN-NEXT: s_mov_b32 s8, s6
; GCN-NEXT: s_mov_b32 s9, s7
; GCN-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
-; GCN-NEXT: s_mov_b32 s0, s4
; GCN-NEXT: s_mov_b32 s1, s5
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_sub_i32_e32 v2, vcc, 0, v1
-; GCN-NEXT: v_max_i32_e32 v2, v1, v2
-; GCN-NEXT: v_cvt_f32_u32_e32 v3, v2
-; GCN-NEXT: v_sub_i32_e32 v4, vcc, 0, v2
-; GCN-NEXT: v_sub_i32_e32 v5, vcc, 0, v0
-; GCN-NEXT: v_rcp_iflag_f32_e32 v3, v3
-; GCN-NEXT: v_max_i32_e32 v5, v0, v5
-; GCN-NEXT: v_xor_b32_e32 v0, v0, v1
-; GCN-NEXT: v_ashrrev_i32_e32 v0, 31, v0
-; GCN-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
-; GCN-NEXT: v_cvt_u32_f32_e32 v3, v3
-; GCN-NEXT: v_mul_lo_u32 v4, v4, v3
-; GCN-NEXT: v_mul_hi_u32 v4, v3, v4
-; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v4
-; GCN-NEXT: v_mul_hi_u32 v3, v5, v3
-; GCN-NEXT: v_mul_lo_u32 v1, v3, v2
-; GCN-NEXT: v_add_i32_e32 v4, vcc, 1, v3
-; GCN-NEXT: v_sub_i32_e32 v1, vcc, v5, v1
-; GCN-NEXT: v_sub_i32_e32 v5, vcc, v1, v2
-; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2
-; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
-; GCN-NEXT: v_add_i32_e32 v4, vcc, 1, v3
-; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2
-; GCN-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
-; GCN-NEXT: v_xor_b32_e32 v1, v1, v0
-; GCN-NEXT: v_sub_i32_e32 v0, vcc, v1, v0
+; GCN-NEXT: v_readfirstlane_b32 s6, v1
+; GCN-NEXT: s_abs_i32 s7, s6
+; GCN-NEXT: v_cvt_f32_u32_e32 v1, s7
+; GCN-NEXT: s_sub_i32 s0, 0, s7
+; GCN-NEXT: v_rcp_iflag_f32_e32 v1, v1
+; GCN-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT: v_mul_lo_u32 v2, s0, v1
+; GCN-NEXT: s_mov_b32 s0, s4
+; GCN-NEXT: v_readfirstlane_b32 s4, v0
+; GCN-NEXT: s_abs_i32 s8, s4
+; GCN-NEXT: v_mul_hi_u32 v2, v1, v2
+; GCN-NEXT: s_xor_b32 s4, s4, s6
+; GCN-NEXT: s_ashr_i32 s4, s4, 31
+; GCN-NEXT: v_add_i32_e32 v0, vcc, v1, v2
+; GCN-NEXT: v_mul_hi_u32 v0, s8, v0
+; GCN-NEXT: v_readfirstlane_b32 s5, v0
+; GCN-NEXT: s_mul_i32 s5, s5, s7
+; GCN-NEXT: s_sub_i32 s5, s8, s5
+; GCN-NEXT: s_sub_i32 s6, s5, s7
+; GCN-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; GCN-NEXT: s_cmp_ge_u32 s5, s7
+; GCN-NEXT: s_cselect_b64 vcc, -1, 0
+; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT: s_cselect_b32 s5, s6, s5
+; GCN-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; GCN-NEXT: s_cmp_ge_u32 s5, s7
+; GCN-NEXT: s_cselect_b64 vcc, -1, 0
+; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT: v_xor_b32_e32 v0, s4, v0
+; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s4, v0
; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
;
@@ -69,36 +72,39 @@ define amdgpu_kernel void @sdiv_i32(ptr addrspace(1) %out, ptr addrspace(1) %in)
; TONGA-NEXT: s_mov_b32 s8, s6
; TONGA-NEXT: s_mov_b32 s9, s7
; TONGA-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
-; TONGA-NEXT: s_mov_b32 s0, s4
; TONGA-NEXT: s_mov_b32 s1, s5
; TONGA-NEXT: s_waitcnt vmcnt(0)
-; TONGA-NEXT: v_sub_u32_e32 v2, vcc, 0, v1
-; TONGA-NEXT: v_max_i32_e32 v2, v1, v2
-; TONGA-NEXT: v_cvt_f32_u32_e32 v3, v2
-; TONGA-NEXT: v_sub_u32_e32 v4, vcc, 0, v2
-; TONGA-NEXT: v_sub_u32_e32 v5, vcc, 0, v0
-; TONGA-NEXT: v_rcp_iflag_f32_e32 v3, v3
-; TONGA-NEXT: v_max_i32_e32 v5, v0, v5
-; TONGA-NEXT: v_xor_b32_e32 v0, v0, v1
-; TONGA-NEXT: v_ashrrev_i32_e32 v0, 31, v0
-; TONGA-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
-; TONGA-NEXT: v_cvt_u32_f32_e32 v3, v3
-; TONGA-NEXT: v_mul_lo_u32 v4, v4, v3
-; TONGA-NEXT: v_mul_hi_u32 v4, v3, v4
-; TONGA-NEXT: v_add_u32_e32 v3, vcc, v3, v4
-; TONGA-NEXT: v_mul_hi_u32 v3, v5, v3
-; TONGA-NEXT: v_mul_lo_u32 v1, v3, v2
-; TONGA-NEXT: v_add_u32_e32 v4, vcc, 1, v3
-; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v5, v1
-; TONGA-NEXT: v_sub_u32_e32 v5, vcc, v1, v2
-; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2
-; TONGA-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
-; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
-; TONGA-NEXT: v_add_u32_e32 v4, vcc, 1, v3
-; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2
-; TONGA-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
-; TONGA-NEXT: v_xor_b32_e32 v1, v1, v0
-; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v1, v0
+; TONGA-NEXT: v_readfirstlane_b32 s6, v1
+; TONGA-NEXT: s_abs_i32 s7, s6
+; TONGA-NEXT: v_cvt_f32_u32_e32 v1, s7
+; TONGA-NEXT: s_sub_i32 s0, 0, s7
+; TONGA-NEXT: v_rcp_iflag_f32_e32 v1, v1
+; TONGA-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; TONGA-NEXT: v_cvt_u32_f32_e32 v1, v1
+; TONGA-NEXT: v_mul_lo_u32 v2, s0, v1
+; TONGA-NEXT: s_mov_b32 s0, s4
+; TONGA-NEXT: v_readfirstlane_b32 s4, v0
+; TONGA-NEXT: s_abs_i32 s8, s4
+; TONGA-NEXT: v_mul_hi_u32 v2, v1, v2
+; TONGA-NEXT: s_xor_b32 s4, s4, s6
+; TONGA-NEXT: s_ashr_i32 s4, s4, 31
+; TONGA-NEXT: v_add_u32_e32 v0, vcc, v1, v2
+; TONGA-NEXT: v_mul_hi_u32 v0, s8, v0
+; TONGA-NEXT: v_readfirstlane_b32 s5, v0
+; TONGA-NEXT: s_mul_i32 s5, s5, s7
+; TONGA-NEXT: s_sub_i32 s5, s8, s5
+; TONGA-NEXT: s_sub_i32 s6, s5, s7
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; TONGA-NEXT: s_cmp_ge_u32 s5, s7
+; TONGA-NEXT: s_cselect_b64 vcc, -1, 0
+; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; TONGA-NEXT: s_cselect_b32 s5, s6, s5
+; TONGA-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; TONGA-NEXT: s_cmp_ge_u32 s5, s7
+; TONGA-NEXT: s_cselect_b64 vcc, -1, 0
+; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; TONGA-NEXT: v_xor_b32_e32 v0, s4, v0
+; TONGA-NEXT: v_subrev_u32_e32 v0, vcc, s4, v0
; TONGA-NEXT: buffer_store_dword v0, off, s[0:3], 0
; TONGA-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll
index 5944342b2642a..c50e55723b15a 100644
--- a/llvm/test/CodeGen/AMDGPU/srem.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem.ll
@@ -4773,52 +4773,68 @@ define amdgpu_kernel void @srem_v2i64_4(ptr addrspace(1) %out, ptr addrspace(1)
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_load_dwordx4 v[0:3], v4, s[2:3]
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_ashrrev_i32_e32 v5, 31, v1
-; GCN-NEXT: v_lshrrev_b32_e32 v5, 30, v5
-; GCN-NEXT: v_ashrrev_i32_e32 v6, 31, v3
-; GCN-NEXT: v_add_co_u32_e32 v5, vcc, v0, v5
-; GCN-NEXT: v_lshrrev_b32_e32 v6, 30, v6
-; GCN-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v1, vcc
-; GCN-NEXT: v_add_co_u32_e32 v6, vcc, v2, v6
-; GCN-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc
-; GCN-NEXT: v_and_b32_e32 v5, -4, v5
-; GCN-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v5
-; GCN-NEXT: v_and_b32_e32 v6, -4, v6
-; GCN-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v7, vcc
-; GCN-NEXT: v_sub_co_u32_e32 v2, vcc, v2, v6
-; GCN-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v8, vcc
+; GCN-NEXT: v_readfirstlane_b32 s3, v1
+; GCN-NEXT: s_ashr_i32 s6, s3, 31
+; GCN-NEXT: v_readfirstlane_b32 s2, v0
+; GCN-NEXT: s_lshr_b32 s6, s6, 30
+; GCN-NEXT: s_add_u32 s6, s2, s6
+; GCN-NEXT: s_addc_u32 s7, s3, 0
+; GCN-NEXT: s_and_b32 s6, s6, -4
+; GCN-NEXT: v_readfirstlane_b32 s5, v3
+; GCN-NEXT: s_sub_u32 s2, s2, s6
+; GCN-NEXT: s_subb_u32 s3, s3, s7
+; GCN-NEXT: s_ashr_i32 s6, s5, 31
+; GCN-NEXT: v_readfirstlane_b32 s4, v2
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: s_lshr_b32 s2, s6, 30
+; GCN-NEXT: s_add_u32 s2, s4, s2
+; GCN-NEXT: v_mov_b32_e32 v1, s3
+; GCN-NEXT: s_addc_u32 s3, s5, 0
+; GCN-NEXT: s_and_b32 s2, s2, -4
+; GCN-NEXT: s_sub_u32 s2, s4, s2
+; GCN-NEXT: s_subb_u32 s3, s5, s3
+; GCN-NEXT: v_mov_b32_e32 v2, s2
+; GCN-NEXT: v_mov_b32_e32 v3, s3
; GCN-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
; GCN-NEXT: s_endpgm
;
; TAHITI-LABEL: srem_v2i64_4:
; TAHITI: ; %bb.0:
-; TAHITI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; TAHITI-NEXT: s_mov_b32 s7, 0xf000
-; TAHITI-NEXT: s_mov_b32 s6, -1
-; TAHITI-NEXT: s_mov_b32 s10, s6
-; TAHITI-NEXT: s_mov_b32 s11, s7
+; TAHITI-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
+; TAHITI-NEXT: s_mov_b32 s3, 0xf000
+; TAHITI-NEXT: s_mov_b32 s2, -1
+; TAHITI-NEXT: s_mov_b32 s10, s2
+; TAHITI-NEXT: s_mov_b32 s11, s3
; TAHITI-NEXT: s_waitcnt lgkmcnt(0)
-; TAHITI-NEXT: s_mov_b32 s8, s2
-; TAHITI-NEXT: s_mov_b32 s9, s3
+; TAHITI-NEXT: s_mov_b32 s8, s6
+; TAHITI-NEXT: s_mov_b32 s9, s7
; TAHITI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; TAHITI-NEXT: s_mov_b32 s4, s0
-; TAHITI-NEXT: s_mov_b32 s5, s1
+; TAHITI-NEXT: s_mov_b32 s1, s5
+; TAHITI-NEXT: s_mov_b32 s0, s4
; TAHITI-NEXT: s_waitcnt vmcnt(0)
-; TAHITI-NEXT: v_ashrrev_i32_e32 v4, 31, v1
-; TAHITI-NEXT: v_lshrrev_b32_e32 v4, 30, v4
-; TAHITI-NEXT: v_ashrrev_i32_e32 v5, 31, v3
-; TAHITI-NEXT: v_add_i32_e32 v4, vcc, v0, v4
-; TAHITI-NEXT: v_lshrrev_b32_e32 v5, 30, v5
-; TAHITI-NEXT: v_addc_u32_e32 v6, vcc, 0, v1, vcc
-; TAHITI-NEXT: v_add_i32_e32 v5, vcc, v2, v5
-; TAHITI-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; TAHITI-NEXT: v_and_b32_e32 v4, -4, v4
-; TAHITI-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
-; TAHITI-NEXT: v_and_b32_e32 v5, -4, v5
-; TAHITI-NEXT: v_subb_u32_e32 v1, vcc, v1, v6, vcc
-; TAHITI-NEXT: v_sub_i32_e32 v2, vcc, v2, v5
-; TAHITI-NEXT: v_subb_u32_e32 v3, vcc, v3, v7, vcc
-; TAHITI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; TAHITI-NEXT: v_readfirstlane_b32 s5, v1
+; TAHITI-NEXT: s_ashr_i32 s8, s5, 31
+; TAHITI-NEXT: v_readfirstlane_b32 s4, v0
+; TAHITI-NEXT: s_lshr_b32 s8, s8, 30
+; TAHITI-NEXT: s_add_u32 s8, s4, s8
+; TAHITI-NEXT: s_addc_u32 s9, s5, 0
+; TAHITI-NEXT: s_and_b32 s8, s8, -4
+; TAHITI-NEXT: v_readfirstlane_b32 s7, v3
+; TAHITI-NEXT: s_sub_u32 s4, s4, s8
+; TAHITI-NEXT: s_subb_u32 s5, s5, s9
+; TAHITI-NEXT: s_ashr_i32 s8, s7, 31
+; TAHITI-NEXT: v_readfirstlane_b32 s6, v2
+; TAHITI-NEXT: v_mov_b32_e32 v0, s4
+; TAHITI-NEXT: s_lshr_b32 s4, s8, 30
+; TAHITI-NEXT: s_add_u32 s4, s6, s4
+; TAHITI-NEXT: v_mov_b32_e32 v1, s5
+; TAHITI-NEXT: s_addc_u32 s5, s7, 0
+; TAHITI-NEXT: s_and_b32 s4, s4, -4
+; TAHITI-NEXT: s_sub_u32 s4, s6, s4
+; TAHITI-NEXT: s_subb_u32 s5, s7, s5
+; TAHITI-NEXT: v_mov_b32_e32 v2, s4
+; TAHITI-NEXT: v_mov_b32_e32 v3, s5
+; TAHITI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; TAHITI-NEXT: s_endpgm
;
; TONGA-LABEL: srem_v2i64_4:
@@ -4828,23 +4844,31 @@ define amdgpu_kernel void @srem_v2i64_4(ptr addrspace(1) %out, ptr addrspace(1)
; TONGA-NEXT: v_mov_b32_e32 v0, s2
; TONGA-NEXT: v_mov_b32_e32 v1, s3
; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
-; TONGA-NEXT: v_mov_b32_e32 v4, s0
; TONGA-NEXT: v_mov_b32_e32 v5, s1
+; TONGA-NEXT: v_mov_b32_e32 v4, s0
; TONGA-NEXT: s_waitcnt vmcnt(0)
-; TONGA-NEXT: v_ashrrev_i32_e32 v6, 31, v1
-; TONGA-NEXT: v_lshrrev_b32_e32 v6, 30, v6
-; TONGA-NEXT: v_ashrrev_i32_e32 v7, 31, v3
-; TONGA-NEXT: v_add_u32_e32 v6, vcc, v0, v6
-; TONGA-NEXT: v_lshrrev_b32_e32 v7, 30, v7
-; TONGA-NEXT: v_addc_u32_e32 v8, vcc, 0, v1, vcc
-; TONGA-NEXT: v_add_u32_e32 v7, vcc, v2, v7
-; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v3, vcc
-; TONGA-NEXT: v_and_b32_e32 v6, -4, v6
-; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v6
-; TONGA-NEXT: v_and_b32_e32 v7, -4, v7
-; TONGA-NEXT: v_subb_u32_e32 v1, vcc, v1, v8, vcc
-; TONGA-NEXT: v_sub_u32_e32 v2, vcc, v2, v7
-; TONGA-NEXT: v_subb_u32_e32 v3, vcc, v3, v9, vcc
+; TONGA-NEXT: v_readfirstlane_b32 s1, v1
+; TONGA-NEXT: s_ashr_i32 s4, s1, 31
+; TONGA-NEXT: v_readfirstlane_b32 s0, v0
+; TONGA-NEXT: s_lshr_b32 s4, s4, 30
+; TONGA-NEXT: s_add_u32 s4, s0, s4
+; TONGA-NEXT: s_addc_u32 s5, s1, 0
+; TONGA-NEXT: s_and_b32 s4, s4, -4
+; TONGA-NEXT: v_readfirstlane_b32 s3, v3
+; TONGA-NEXT: s_sub_u32 s0, s0, s4
+; TONGA-NEXT: s_subb_u32 s1, s1, s5
+; TONGA-NEXT: s_ashr_i32 s4, s3, 31
+; TONGA-NEXT: v_readfirstlane_b32 s2, v2
+; TONGA-NEXT: v_mov_b32_e32 v0, s0
+; TONGA-NEXT: s_lshr_b32 s0, s4, 30
+; TONGA-NEXT: s_add_u32 s0, s2, s0
+; TONGA-NEXT: v_mov_b32_e32 v1, s1
+; TONGA-NEXT: s_addc_u32 s1, s3, 0
+; TONGA-NEXT: s_and_b32 s0, s0, -4
+; TONGA-NEXT: s_sub_u32 s0, s2, s0
+; TONGA-NEXT: s_subb_u32 s1, s3, s1
+; TONGA-NEXT: v_mov_b32_e32 v2, s0
+; TONGA-NEXT: v_mov_b32_e32 v3, s1
; TONGA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; TONGA-NEXT: s_endpgm
;
@@ -9005,38 +9029,54 @@ define amdgpu_kernel void @srem_v4i64_4(ptr addrspace(1) %out, ptr addrspace(1)
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: v_mov_b32_e32 v8, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: global_load_dwordx4 v[0:3], v8, s[2:3]
-; GCN-NEXT: global_load_dwordx4 v[4:7], v8, s[2:3] offset:16
+; GCN-NEXT: global_load_dwordx4 v[0:3], v8, s[2:3] offset:16
+; GCN-NEXT: global_load_dwordx4 v[4:7], v8, s[2:3]
; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_ashrrev_i32_e32 v9, 31, v1
-; GCN-NEXT: v_lshrrev_b32_e32 v9, 30, v9
-; GCN-NEXT: v_ashrrev_i32_e32 v10, 31, v3
-; GCN-NEXT: v_add_co_u32_e32 v9, vcc, v0, v9
-; GCN-NEXT: v_lshrrev_b32_e32 v10, 30, v10
-; GCN-NEXT: v_addc_co_u32_e32 v13, vcc, 0, v1, vcc
+; GCN-NEXT: v_readfirstlane_b32 s2, v0
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_ashrrev_i32_e32 v11, 31, v5
-; GCN-NEXT: v_add_co_u32_e32 v10, vcc, v2, v10
-; GCN-NEXT: v_lshrrev_b32_e32 v11, 30, v11
-; GCN-NEXT: v_addc_co_u32_e32 v14, vcc, 0, v3, vcc
-; GCN-NEXT: v_ashrrev_i32_e32 v12, 31, v7
-; GCN-NEXT: v_add_co_u32_e32 v11, vcc, v4, v11
-; GCN-NEXT: v_lshrrev_b32_e32 v12, 30, v12
-; GCN-NEXT: v_addc_co_u32_e32 v15, vcc, 0, v5, vcc
-; GCN-NEXT: v_add_co_u32_e32 v12, vcc, v6, v12
-; GCN-NEXT: v_addc_co_u32_e32 v16, vcc, 0, v7, vcc
-; GCN-NEXT: v_and_b32_e32 v9, -4, v9
-; GCN-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v9
-; GCN-NEXT: v_and_b32_e32 v10, -4, v10
-; GCN-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v13, vcc
-; GCN-NEXT: v_sub_co_u32_e32 v2, vcc, v2, v10
-; GCN-NEXT: v_and_b32_e32 v11, -4, v11
-; GCN-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v14, vcc
-; GCN-NEXT: v_sub_co_u32_e32 v4, vcc, v4, v11
-; GCN-NEXT: v_and_b32_e32 v12, -4, v12
-; GCN-NEXT: v_subb_co_u32_e32 v5, vcc, v5, v15, vcc
-; GCN-NEXT: v_sub_co_u32_e32 v6, vcc, v6, v12
-; GCN-NEXT: v_subb_co_u32_e32 v7, vcc, v7, v16, vcc
+; GCN-NEXT: v_readfirstlane_b32 s7, v5
+; GCN-NEXT: s_ashr_i32 s10, s7, 31
+; GCN-NEXT: v_readfirstlane_b32 s6, v4
+; GCN-NEXT: s_lshr_b32 s10, s10, 30
+; GCN-NEXT: s_add_u32 s10, s6, s10
+; GCN-NEXT: s_addc_u32 s11, s7, 0
+; GCN-NEXT: s_and_b32 s10, s10, -4
+; GCN-NEXT: v_readfirstlane_b32 s9, v7
+; GCN-NEXT: s_sub_u32 s6, s6, s10
+; GCN-NEXT: s_subb_u32 s7, s7, s11
+; GCN-NEXT: s_ashr_i32 s10, s9, 31
+; GCN-NEXT: v_readfirstlane_b32 s8, v6
+; GCN-NEXT: v_mov_b32_e32 v0, s6
+; GCN-NEXT: s_lshr_b32 s6, s10, 30
+; GCN-NEXT: s_add_u32 s6, s8, s6
+; GCN-NEXT: v_readfirstlane_b32 s3, v1
+; GCN-NEXT: v_mov_b32_e32 v1, s7
+; GCN-NEXT: s_addc_u32 s7, s9, 0
+; GCN-NEXT: s_and_b32 s6, s6, -4
+; GCN-NEXT: s_sub_u32 s6, s8, s6
+; GCN-NEXT: s_subb_u32 s7, s9, s7
+; GCN-NEXT: s_ashr_i32 s8, s3, 31
+; GCN-NEXT: v_readfirstlane_b32 s4, v2
+; GCN-NEXT: v_mov_b32_e32 v2, s6
+; GCN-NEXT: s_lshr_b32 s6, s8, 30
+; GCN-NEXT: s_add_u32 s6, s2, s6
+; GCN-NEXT: v_readfirstlane_b32 s5, v3
+; GCN-NEXT: v_mov_b32_e32 v3, s7
+; GCN-NEXT: s_addc_u32 s7, s3, 0
+; GCN-NEXT: s_and_b32 s6, s6, -4
+; GCN-NEXT: s_sub_u32 s2, s2, s6
+; GCN-NEXT: s_subb_u32 s3, s3, s7
+; GCN-NEXT: s_ashr_i32 s6, s5, 31
+; GCN-NEXT: v_mov_b32_e32 v4, s2
+; GCN-NEXT: s_lshr_b32 s2, s6, 30
+; GCN-NEXT: s_add_u32 s2, s4, s2
+; GCN-NEXT: v_mov_b32_e32 v5, s3
+; GCN-NEXT: s_addc_u32 s3, s5, 0
+; GCN-NEXT: s_and_b32 s2, s2, -4
+; GCN-NEXT: s_sub_u32 s2, s4, s2
+; GCN-NEXT: s_subb_u32 s3, s5, s3
+; GCN-NEXT: v_mov_b32_e32 v6, s2
+; GCN-NEXT: v_mov_b32_e32 v7, s3
; GCN-NEXT: global_store_dwordx4 v8, v[4:7], s[0:1] offset:16
; GCN-NEXT: global_store_dwordx4 v8, v[0:3], s[0:1]
; GCN-NEXT: s_endpgm
@@ -9051,40 +9091,56 @@ define amdgpu_kernel void @srem_v4i64_4(ptr addrspace(1) %out, ptr addrspace(1)
; TAHITI-NEXT: s_waitcnt lgkmcnt(0)
; TAHITI-NEXT: s_mov_b32 s8, s6
; TAHITI-NEXT: s_mov_b32 s9, s7
-; TAHITI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; TAHITI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16
+; TAHITI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0 offset:16
+; TAHITI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0
; TAHITI-NEXT: s_mov_b32 s0, s4
; TAHITI-NEXT: s_mov_b32 s1, s5
; TAHITI-NEXT: s_waitcnt vmcnt(1)
-; TAHITI-NEXT: v_ashrrev_i32_e32 v8, 31, v1
-; TAHITI-NEXT: v_lshrrev_b32_e32 v8, 30, v8
-; TAHITI-NEXT: v_ashrrev_i32_e32 v9, 31, v3
-; TAHITI-NEXT: v_add_i32_e32 v8, vcc, v0, v8
-; TAHITI-NEXT: v_lshrrev_b32_e32 v9, 30, v9
-; TAHITI-NEXT: v_addc_u32_e32 v12, vcc, 0, v1, vcc
+; TAHITI-NEXT: v_readfirstlane_b32 s4, v0
; TAHITI-NEXT: s_waitcnt vmcnt(0)
-; TAHITI-NEXT: v_ashrrev_i32_e32 v10, 31, v5
-; TAHITI-NEXT: v_add_i32_e32 v9, vcc, v2, v9
-; TAHITI-NEXT: v_lshrrev_b32_e32 v10, 30, v10
-; TAHITI-NEXT: v_addc_u32_e32 v13, vcc, 0, v3, vcc
-; TAHITI-NEXT: v_ashrrev_i32_e32 v11, 31, v7
-; TAHITI-NEXT: v_add_i32_e32 v10, vcc, v4, v10
-; TAHITI-NEXT: v_lshrrev_b32_e32 v11, 30, v11
-; TAHITI-NEXT: v_addc_u32_e32 v14, vcc, 0, v5, vcc
-; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v6, v11
-; TAHITI-NEXT: v_addc_u32_e32 v15, vcc, 0, v7, vcc
-; TAHITI-NEXT: v_and_b32_e32 v8, -4, v8
-; TAHITI-NEXT: v_sub_i32_e32 v0, vcc, v0, v8
-; TAHITI-NEXT: v_and_b32_e32 v9, -4, v9
-; TAHITI-NEXT: v_subb_u32_e32 v1, vcc, v1, v12, vcc
-; TAHITI-NEXT: v_sub_i32_e32 v2, vcc, v2, v9
-; TAHITI-NEXT: v_and_b32_e32 v10, -4, v10
-; TAHITI-NEXT: v_subb_u32_e32 v3, vcc, v3, v13, vcc
-; TAHITI-NEXT: v_sub_i32_e32 v4, vcc, v4, v10
-; TAHITI-NEXT: v_and_b32_e32 v11, -4, v11
-; TAHITI-NEXT: v_subb_u32_e32 v5, vcc, v5, v14, vcc
-; TAHITI-NEXT: v_sub_i32_e32 v6, vcc, v6, v11
-; TAHITI-NEXT: v_subb_u32_e32 v7, vcc, v7, v15, vcc
+; TAHITI-NEXT: v_readfirstlane_b32 s9, v5
+; TAHITI-NEXT: s_ashr_i32 s12, s9, 31
+; TAHITI-NEXT: v_readfirstlane_b32 s8, v4
+; TAHITI-NEXT: s_lshr_b32 s12, s12, 30
+; TAHITI-NEXT: s_add_u32 s12, s8, s12
+; TAHITI-NEXT: s_addc_u32 s13, s9, 0
+; TAHITI-NEXT: s_and_b32 s12, s12, -4
+; TAHITI-NEXT: v_readfirstlane_b32 s11, v7
+; TAHITI-NEXT: s_sub_u32 s8, s8, s12
+; TAHITI-NEXT: s_subb_u32 s9, s9, s13
+; TAHITI-NEXT: s_ashr_i32 s12, s11, 31
+; TAHITI-NEXT: v_readfirstlane_b32 s10, v6
+; TAHITI-NEXT: v_mov_b32_e32 v0, s8
+; TAHITI-NEXT: s_lshr_b32 s8, s12, 30
+; TAHITI-NEXT: s_add_u32 s8, s10, s8
+; TAHITI-NEXT: v_readfirstlane_b32 s5, v1
+; TAHITI-NEXT: v_mov_b32_e32 v1, s9
+; TAHITI-NEXT: s_addc_u32 s9, s11, 0
+; TAHITI-NEXT: s_and_b32 s8, s8, -4
+; TAHITI-NEXT: s_sub_u32 s8, s10, s8
+; TAHITI-NEXT: s_subb_u32 s9, s11, s9
+; TAHITI-NEXT: s_ashr_i32 s10, s5, 31
+; TAHITI-NEXT: v_readfirstlane_b32 s6, v2
+; TAHITI-NEXT: v_mov_b32_e32 v2, s8
+; TAHITI-NEXT: s_lshr_b32 s8, s10, 30
+; TAHITI-NEXT: s_add_u32 s8, s4, s8
+; TAHITI-NEXT: v_readfirstlane_b32 s7, v3
+; TAHITI-NEXT: v_mov_b32_e32 v3, s9
+; TAHITI-NEXT: s_addc_u32 s9, s5, 0
+; TAHITI-NEXT: s_and_b32 s8, s8, -4
+; TAHITI-NEXT: s_sub_u32 s4, s4, s8
+; TAHITI-NEXT: s_subb_u32 s5, s5, s9
+; TAHITI-NEXT: s_ashr_i32 s8, s7, 31
+; TAHITI-NEXT: v_mov_b32_e32 v4, s4
+; TAHITI-NEXT: s_lshr_b32 s4, s8, 30
+; TAHITI-NEXT: s_add_u32 s4, s6, s4
+; TAHITI-NEXT: v_mov_b32_e32 v5, s5
+; TAHITI-NEXT: s_addc_u32 s5, s7, 0
+; TAHITI-NEXT: s_and_b32 s4, s4, -4
+; TAHITI-NEXT: s_sub_u32 s4, s6, s4
+; TAHITI-NEXT: s_subb_u32 s5, s7, s5
+; TAHITI-NEXT: v_mov_b32_e32 v6, s4
+; TAHITI-NEXT: v_mov_b32_e32 v7, s5
; TAHITI-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
; TAHITI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; TAHITI-NEXT: s_endpgm
@@ -9093,52 +9149,69 @@ define amdgpu_kernel void @srem_v4i64_4(ptr addrspace(1) %out, ptr addrspace(1)
; TONGA: ; %bb.0:
; TONGA-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; TONGA-NEXT: s_waitcnt lgkmcnt(0)
-; TONGA-NEXT: v_mov_b32_e32 v0, s2
-; TONGA-NEXT: v_mov_b32_e32 v1, s3
-; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
; TONGA-NEXT: s_add_u32 s4, s2, 16
; TONGA-NEXT: s_addc_u32 s5, s3, 0
-; TONGA-NEXT: v_mov_b32_e32 v4, s4
-; TONGA-NEXT: v_mov_b32_e32 v5, s5
+; TONGA-NEXT: v_mov_b32_e32 v0, s4
+; TONGA-NEXT: v_mov_b32_e32 v5, s3
+; TONGA-NEXT: v_mov_b32_e32 v1, s5
+; TONGA-NEXT: v_mov_b32_e32 v4, s2
+; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
; TONGA-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
; TONGA-NEXT: v_mov_b32_e32 v9, s1
; TONGA-NEXT: v_mov_b32_e32 v8, s0
-; TONGA-NEXT: s_add_u32 s0, s0, 16
-; TONGA-NEXT: s_addc_u32 s1, s1, 0
-; TONGA-NEXT: v_mov_b32_e32 v11, s1
-; TONGA-NEXT: v_mov_b32_e32 v10, s0
; TONGA-NEXT: s_waitcnt vmcnt(1)
-; TONGA-NEXT: v_ashrrev_i32_e32 v12, 31, v1
-; TONGA-NEXT: v_lshrrev_b32_e32 v12, 30, v12
-; TONGA-NEXT: v_add_u32_e32 v12, vcc, v0, v12
-; TONGA-NEXT: v_addc_u32_e32 v16, vcc, 0, v1, vcc
-; TONGA-NEXT: v_and_b32_e32 v12, -4, v12
-; TONGA-NEXT: v_ashrrev_i32_e32 v13, 31, v3
-; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v12
-; TONGA-NEXT: v_lshrrev_b32_e32 v13, 30, v13
-; TONGA-NEXT: v_subb_u32_e32 v1, vcc, v1, v16, vcc
-; TONGA-NEXT: v_add_u32_e32 v12, vcc, v2, v13
-; TONGA-NEXT: v_addc_u32_e32 v13, vcc, 0, v3, vcc
-; TONGA-NEXT: v_and_b32_e32 v12, -4, v12
+; TONGA-NEXT: v_readfirstlane_b32 s2, v0
; TONGA-NEXT: s_waitcnt vmcnt(0)
-; TONGA-NEXT: v_ashrrev_i32_e32 v14, 31, v5
-; TONGA-NEXT: v_ashrrev_i32_e32 v15, 31, v7
-; TONGA-NEXT: v_sub_u32_e32 v2, vcc, v2, v12
-; TONGA-NEXT: v_lshrrev_b32_e32 v14, 30, v14
-; TONGA-NEXT: v_lshrrev_b32_e32 v15, 30, v15
-; TONGA-NEXT: v_subb_u32_e32 v3, vcc, v3, v13, vcc
-; TONGA-NEXT: v_add_u32_e64 v12, s[0:1], v4, v14
-; TONGA-NEXT: v_add_u32_e32 v13, vcc, v6, v15
-; TONGA-NEXT: v_addc_u32_e32 v15, vcc, 0, v7, vcc
-; TONGA-NEXT: v_and_b32_e32 v12, -4, v12
-; TONGA-NEXT: v_addc_u32_e64 v14, s[0:1], 0, v5, s[0:1]
-; TONGA-NEXT: v_sub_u32_e32 v4, vcc, v4, v12
-; TONGA-NEXT: v_and_b32_e32 v13, -4, v13
-; TONGA-NEXT: v_subb_u32_e32 v5, vcc, v5, v14, vcc
-; TONGA-NEXT: v_sub_u32_e32 v6, vcc, v6, v13
-; TONGA-NEXT: v_subb_u32_e32 v7, vcc, v7, v15, vcc
+; TONGA-NEXT: v_readfirstlane_b32 s7, v5
+; TONGA-NEXT: s_ashr_i32 s10, s7, 31
+; TONGA-NEXT: v_readfirstlane_b32 s6, v4
+; TONGA-NEXT: s_lshr_b32 s10, s10, 30
+; TONGA-NEXT: s_add_u32 s10, s6, s10
+; TONGA-NEXT: s_addc_u32 s11, s7, 0
+; TONGA-NEXT: s_and_b32 s10, s10, -4
+; TONGA-NEXT: v_readfirstlane_b32 s9, v7
+; TONGA-NEXT: s_sub_u32 s6, s6, s10
+; TONGA-NEXT: s_subb_u32 s7, s7, s11
+; TONGA-NEXT: s_ashr_i32 s10, s9, 31
+; TONGA-NEXT: v_readfirstlane_b32 s8, v6
+; TONGA-NEXT: v_mov_b32_e32 v0, s6
+; TONGA-NEXT: s_lshr_b32 s6, s10, 30
+; TONGA-NEXT: s_add_u32 s6, s8, s6
+; TONGA-NEXT: v_readfirstlane_b32 s3, v1
+; TONGA-NEXT: v_mov_b32_e32 v1, s7
+; TONGA-NEXT: s_addc_u32 s7, s9, 0
+; TONGA-NEXT: s_and_b32 s6, s6, -4
+; TONGA-NEXT: s_sub_u32 s6, s8, s6
+; TONGA-NEXT: s_subb_u32 s7, s9, s7
+; TONGA-NEXT: s_ashr_i32 s8, s3, 31
+; TONGA-NEXT: v_readfirstlane_b32 s4, v2
+; TONGA-NEXT: v_mov_b32_e32 v2, s6
+; TONGA-NEXT: s_lshr_b32 s6, s8, 30
+; TONGA-NEXT: s_add_u32 s6, s2, s6
+; TONGA-NEXT: v_readfirstlane_b32 s5, v3
+; TONGA-NEXT: v_mov_b32_e32 v3, s7
+; TONGA-NEXT: s_addc_u32 s7, s3, 0
+; TONGA-NEXT: s_and_b32 s6, s6, -4
+; TONGA-NEXT: s_sub_u32 s2, s2, s6
+; TONGA-NEXT: s_subb_u32 s3, s3, s7
+; TONGA-NEXT: s_ashr_i32 s6, s5, 31
; TONGA-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
-; TONGA-NEXT: flat_store_dwordx4 v[10:11], v[4:7]
+; TONGA-NEXT: s_nop 0
+; TONGA-NEXT: v_mov_b32_e32 v0, s2
+; TONGA-NEXT: s_lshr_b32 s2, s6, 30
+; TONGA-NEXT: s_add_u32 s2, s4, s2
+; TONGA-NEXT: v_mov_b32_e32 v1, s3
+; TONGA-NEXT: s_addc_u32 s3, s5, 0
+; TONGA-NEXT: s_and_b32 s2, s2, -4
+; TONGA-NEXT: s_sub_u32 s2, s4, s2
+; TONGA-NEXT: s_subb_u32 s3, s5, s3
+; TONGA-NEXT: s_add_u32 s0, s0, 16
+; TONGA-NEXT: s_addc_u32 s1, s1, 0
+; TONGA-NEXT: v_mov_b32_e32 v5, s1
+; TONGA-NEXT: v_mov_b32_e32 v2, s2
+; TONGA-NEXT: v_mov_b32_e32 v3, s3
+; TONGA-NEXT: v_mov_b32_e32 v4, s0
+; TONGA-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; TONGA-NEXT: s_endpgm
;
; EG-LABEL: srem_v4i64_4:
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll
index b5d9d00c48045..ed2f06b8136a2 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll
@@ -3963,8 +3963,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) {
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
; GFX7-SDAG-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[4:5]
-; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX7-SDAG-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -4067,8 +4067,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) {
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
; GFX8-SDAG-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[4:5]
-; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX8-SDAG-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -4175,8 +4175,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) {
; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc
; GFX9-SDAG-NEXT: v_cmp_gt_i64_e32 vcc, v[2:3], v[4:5]
; GFX9-SDAG-NEXT: s_nop 1
-; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX9-SDAG-NEXT: v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
; GFX9-SDAG-NEXT: s_nop 1
; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
@@ -4283,8 +4283,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) {
; GFX10-SDAG-NEXT: v_cmp_gt_i64_e64 s4, v[2:3], v[6:7]
; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
-; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
; GFX10-SDAG-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4387,8 +4387,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) {
; GFX11-SDAG-NEXT: v_cmp_gt_i64_e64 s0, v[2:3], v[6:7]
; GFX11-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
; GFX11-SDAG-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
; GFX11-SDAG-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -4502,8 +4502,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) {
; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0
; GFX12-SDAG-NEXT: s_wait_alu 0xf1ff
; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
; GFX12-SDAG-NEXT: v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
; GFX12-SDAG-NEXT: s_wait_alu 0xfffd
; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll
index 2a989ecd2ebad..8812cae20f110 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll
@@ -3963,8 +3963,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) {
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
; GFX7-SDAG-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[4:5]
-; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX7-SDAG-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -4067,8 +4067,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) {
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
; GFX8-SDAG-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[4:5]
-; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX8-SDAG-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -4175,8 +4175,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) {
; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc
; GFX9-SDAG-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[4:5]
; GFX9-SDAG-NEXT: s_nop 1
-; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX9-SDAG-NEXT: v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
; GFX9-SDAG-NEXT: s_nop 1
; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
@@ -4283,8 +4283,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) {
; GFX10-SDAG-NEXT: v_cmp_lt_i64_e64 s4, v[2:3], v[6:7]
; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
-; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
; GFX10-SDAG-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4387,8 +4387,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) {
; GFX11-SDAG-NEXT: v_cmp_lt_i64_e64 s0, v[2:3], v[6:7]
; GFX11-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
; GFX11-SDAG-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
; GFX11-SDAG-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -4502,8 +4502,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) {
; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0
; GFX12-SDAG-NEXT: s_wait_alu 0xf1ff
; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
; GFX12-SDAG-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
; GFX12-SDAG-NEXT: s_wait_alu 0xfffd
; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll
index 69fd58aadfbcc..82eb122f9f703 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll
@@ -3839,8 +3839,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) {
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
; GFX7-SDAG-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
-; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX7-SDAG-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -3943,8 +3943,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) {
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
; GFX8-SDAG-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
-; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX8-SDAG-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -4051,8 +4051,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) {
; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc
; GFX9-SDAG-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
; GFX9-SDAG-NEXT: s_nop 1
-; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX9-SDAG-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
; GFX9-SDAG-NEXT: s_nop 1
; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
@@ -4159,8 +4159,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) {
; GFX10-SDAG-NEXT: v_cmp_gt_u64_e64 s4, v[2:3], v[6:7]
; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
-; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
; GFX10-SDAG-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4263,8 +4263,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) {
; GFX11-SDAG-NEXT: v_cmp_gt_u64_e64 s0, v[2:3], v[6:7]
; GFX11-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
; GFX11-SDAG-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
; GFX11-SDAG-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -4378,8 +4378,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) {
; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0
; GFX12-SDAG-NEXT: s_wait_alu 0xf1ff
; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
; GFX12-SDAG-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
; GFX12-SDAG-NEXT: s_wait_alu 0xfffd
; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll
index 1d3b42ee43b0f..7b304b2c81e2f 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll
@@ -3575,8 +3575,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) {
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
; GFX7-SDAG-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[4:5]
-; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX7-SDAG-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX7-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -3679,8 +3679,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) {
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
; GFX8-SDAG-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[4:5]
-; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX8-SDAG-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX8-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -3787,8 +3787,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) {
; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc
; GFX9-SDAG-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[4:5]
; GFX9-SDAG-NEXT: s_nop 1
-; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
; GFX9-SDAG-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
; GFX9-SDAG-NEXT: s_nop 1
; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
@@ -3895,8 +3895,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) {
; GFX10-SDAG-NEXT: v_cmp_lt_u64_e64 s4, v[2:3], v[6:7]
; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
-; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s4
; GFX10-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
; GFX10-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -3999,8 +3999,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) {
; GFX11-SDAG-NEXT: v_cmp_lt_u64_e64 s0, v[2:3], v[6:7]
; GFX11-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
; GFX11-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
; GFX11-SDAG-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -4114,8 +4114,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) {
; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0
; GFX12-SDAG-NEXT: s_wait_alu 0xf1ff
; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s0
; GFX12-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
; GFX12-SDAG-NEXT: s_wait_alu 0xfffd
; GFX12-SDAG-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
diff --git a/llvm/test/CodeGen/RISCV/abds.ll b/llvm/test/CodeGen/RISCV/abds.ll
index f11a9c854c465..6fdd9e44fd200 100644
--- a/llvm/test/CodeGen/RISCV/abds.ll
+++ b/llvm/test/CodeGen/RISCV/abds.ll
@@ -2013,48 +2013,48 @@ define i128 @abd_subnsw_i128(i128 %a, i128 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: lw a3, 0(a2)
; RV32I-NEXT: lw a4, 4(a2)
-; RV32I-NEXT: lw a5, 8(a2)
-; RV32I-NEXT: lw a2, 12(a2)
-; RV32I-NEXT: lw a7, 8(a1)
+; RV32I-NEXT: lw a6, 8(a2)
+; RV32I-NEXT: lw a7, 12(a2)
+; RV32I-NEXT: lw a2, 8(a1)
; RV32I-NEXT: lw t0, 12(a1)
-; RV32I-NEXT: lw a6, 0(a1)
+; RV32I-NEXT: lw a5, 0(a1)
; RV32I-NEXT: lw a1, 4(a1)
-; RV32I-NEXT: sltu t1, a7, a5
-; RV32I-NEXT: sub t0, t0, a2
-; RV32I-NEXT: sltu a2, a6, a3
-; RV32I-NEXT: sub t0, t0, t1
-; RV32I-NEXT: mv t1, a2
+; RV32I-NEXT: sltu t1, a2, a6
+; RV32I-NEXT: sub a7, t0, a7
+; RV32I-NEXT: sltu t0, a5, a3
+; RV32I-NEXT: sub a7, a7, t1
+; RV32I-NEXT: mv t1, t0
; RV32I-NEXT: beq a1, a4, .LBB31_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sltu t1, a1, a4
; RV32I-NEXT: .LBB31_2:
-; RV32I-NEXT: sub a5, a7, a5
+; RV32I-NEXT: sub a6, a2, a6
; RV32I-NEXT: sub a1, a1, a4
-; RV32I-NEXT: sltu a4, a5, t1
-; RV32I-NEXT: sub a5, a5, t1
-; RV32I-NEXT: sub a4, t0, a4
-; RV32I-NEXT: sub a2, a1, a2
-; RV32I-NEXT: sub a1, a6, a3
-; RV32I-NEXT: bgez a4, .LBB31_4
+; RV32I-NEXT: sltu a2, a6, t1
+; RV32I-NEXT: sub a1, a1, t0
+; RV32I-NEXT: sub a2, a7, a2
+; RV32I-NEXT: sub a4, a6, t1
+; RV32I-NEXT: sub a3, a5, a3
+; RV32I-NEXT: bgez a2, .LBB31_4
; RV32I-NEXT: # %bb.3:
-; RV32I-NEXT: snez a3, a2
-; RV32I-NEXT: snez a6, a1
-; RV32I-NEXT: neg a7, a5
-; RV32I-NEXT: snez a5, a5
-; RV32I-NEXT: or a3, a6, a3
-; RV32I-NEXT: add a4, a4, a5
-; RV32I-NEXT: add a2, a2, a6
-; RV32I-NEXT: sltu a6, a7, a3
-; RV32I-NEXT: neg a4, a4
-; RV32I-NEXT: sub a5, a7, a3
+; RV32I-NEXT: neg a5, a4
+; RV32I-NEXT: or a6, a3, a1
+; RV32I-NEXT: snez a4, a4
+; RV32I-NEXT: snez a7, a3
+; RV32I-NEXT: snez a6, a6
+; RV32I-NEXT: add a2, a2, a4
+; RV32I-NEXT: add a1, a1, a7
+; RV32I-NEXT: sltu a7, a5, a6
; RV32I-NEXT: neg a2, a2
-; RV32I-NEXT: sub a4, a4, a6
+; RV32I-NEXT: sub a4, a5, a6
; RV32I-NEXT: neg a1, a1
+; RV32I-NEXT: sub a2, a2, a7
+; RV32I-NEXT: neg a3, a3
; RV32I-NEXT: .LBB31_4:
-; RV32I-NEXT: sw a1, 0(a0)
-; RV32I-NEXT: sw a2, 4(a0)
-; RV32I-NEXT: sw a5, 8(a0)
-; RV32I-NEXT: sw a4, 12(a0)
+; RV32I-NEXT: sw a3, 0(a0)
+; RV32I-NEXT: sw a1, 4(a0)
+; RV32I-NEXT: sw a4, 8(a0)
+; RV32I-NEXT: sw a2, 12(a0)
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_subnsw_i128:
@@ -2076,48 +2076,48 @@ define i128 @abd_subnsw_i128(i128 %a, i128 %b) nounwind {
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: lw a3, 0(a2)
; RV32ZBB-NEXT: lw a4, 4(a2)
-; RV32ZBB-NEXT: lw a5, 8(a2)
-; RV32ZBB-NEXT: lw a2, 12(a2)
-; RV32ZBB-NEXT: lw a7, 8(a1)
+; RV32ZBB-NEXT: lw a6, 8(a2)
+; RV32ZBB-NEXT: lw a7, 12(a2)
+; RV32ZBB-NEXT: lw a2, 8(a1)
; RV32ZBB-NEXT: lw t0, 12(a1)
-; RV32ZBB-NEXT: lw a6, 0(a1)
+; RV32ZBB-NEXT: lw a5, 0(a1)
; RV32ZBB-NEXT: lw a1, 4(a1)
-; RV32ZBB-NEXT: sltu t1, a7, a5
-; RV32ZBB-NEXT: sub t0, t0, a2
-; RV32ZBB-NEXT: sltu a2, a6, a3
-; RV32ZBB-NEXT: sub t0, t0, t1
-; RV32ZBB-NEXT: mv t1, a2
+; RV32ZBB-NEXT: sltu t1, a2, a6
+; RV32ZBB-NEXT: sub a7, t0, a7
+; RV32ZBB-NEXT: sltu t0, a5, a3
+; RV32ZBB-NEXT: sub a7, a7, t1
+; RV32ZBB-NEXT: mv t1, t0
; RV32ZBB-NEXT: beq a1, a4, .LBB31_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: sltu t1, a1, a4
; RV32ZBB-NEXT: .LBB31_2:
-; RV32ZBB-NEXT: sub a5, a7, a5
+; RV32ZBB-NEXT: sub a6, a2, a6
; RV32ZBB-NEXT: sub a1, a1, a4
-; RV32ZBB-NEXT: sltu a4, a5, t1
-; RV32ZBB-NEXT: sub a5, a5, t1
-; RV32ZBB-NEXT: sub a4, t0, a4
-; RV32ZBB-NEXT: sub a2, a1, a2
-; RV32ZBB-NEXT: sub a1, a6, a3
-; RV32ZBB-NEXT: bgez a4, .LBB31_4
+; RV32ZBB-NEXT: sltu a2, a6, t1
+; RV32ZBB-NEXT: sub a1, a1, t0
+; RV32ZBB-NEXT: sub a2, a7, a2
+; RV32ZBB-NEXT: sub a4, a6, t1
+; RV32ZBB-NEXT: sub a3, a5, a3
+; RV32ZBB-NEXT: bgez a2, .LBB31_4
; RV32ZBB-NEXT: # %bb.3:
-; RV32ZBB-NEXT: snez a3, a2
-; RV32ZBB-NEXT: snez a6, a1
-; RV32ZBB-NEXT: neg a7, a5
-; RV32ZBB-NEXT: snez a5, a5
-; RV32ZBB-NEXT: or a3, a6, a3
-; RV32ZBB-NEXT: add a4, a4, a5
-; RV32ZBB-NEXT: add a2, a2, a6
-; RV32ZBB-NEXT: sltu a6, a7, a3
-; RV32ZBB-NEXT: neg a4, a4
-; RV32ZBB-NEXT: sub a5, a7, a3
+; RV32ZBB-NEXT: neg a5, a4
+; RV32ZBB-NEXT: or a6, a3, a1
+; RV32ZBB-NEXT: snez a4, a4
+; RV32ZBB-NEXT: snez a7, a3
+; RV32ZBB-NEXT: snez a6, a6
+; RV32ZBB-NEXT: add a2, a2, a4
+; RV32ZBB-NEXT: add a1, a1, a7
+; RV32ZBB-NEXT: sltu a7, a5, a6
; RV32ZBB-NEXT: neg a2, a2
-; RV32ZBB-NEXT: sub a4, a4, a6
+; RV32ZBB-NEXT: sub a4, a5, a6
; RV32ZBB-NEXT: neg a1, a1
+; RV32ZBB-NEXT: sub a2, a2, a7
+; RV32ZBB-NEXT: neg a3, a3
; RV32ZBB-NEXT: .LBB31_4:
-; RV32ZBB-NEXT: sw a1, 0(a0)
-; RV32ZBB-NEXT: sw a2, 4(a0)
-; RV32ZBB-NEXT: sw a5, 8(a0)
-; RV32ZBB-NEXT: sw a4, 12(a0)
+; RV32ZBB-NEXT: sw a3, 0(a0)
+; RV32ZBB-NEXT: sw a1, 4(a0)
+; RV32ZBB-NEXT: sw a4, 8(a0)
+; RV32ZBB-NEXT: sw a2, 12(a0)
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_subnsw_i128:
@@ -2144,48 +2144,48 @@ define i128 @abd_subnsw_i128_undef(i128 %a, i128 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: lw a3, 0(a2)
; RV32I-NEXT: lw a4, 4(a2)
-; RV32I-NEXT: lw a5, 8(a2)
-; RV32I-NEXT: lw a2, 12(a2)
-; RV32I-NEXT: lw a7, 8(a1)
+; RV32I-NEXT: lw a6, 8(a2)
+; RV32I-NEXT: lw a7, 12(a2)
+; RV32I-NEXT: lw a2, 8(a1)
; RV32I-NEXT: lw t0, 12(a1)
-; RV32I-NEXT: lw a6, 0(a1)
+; RV32I-NEXT: lw a5, 0(a1)
; RV32I-NEXT: lw a1, 4(a1)
-; RV32I-NEXT: sltu t1, a7, a5
-; RV32I-NEXT: sub t0, t0, a2
-; RV32I-NEXT: sltu a2, a6, a3
-; RV32I-NEXT: sub t0, t0, t1
-; RV32I-NEXT: mv t1, a2
+; RV32I-NEXT: sltu t1, a2, a6
+; RV32I-NEXT: sub a7, t0, a7
+; RV32I-NEXT: sltu t0, a5, a3
+; RV32I-NEXT: sub a7, a7, t1
+; RV32I-NEXT: mv t1, t0
; RV32I-NEXT: beq a1, a4, .LBB32_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sltu t1, a1, a4
; RV32I-NEXT: .LBB32_2:
-; RV32I-NEXT: sub a5, a7, a5
+; RV32I-NEXT: sub a6, a2, a6
; RV32I-NEXT: sub a1, a1, a4
-; RV32I-NEXT: sltu a4, a5, t1
-; RV32I-NEXT: sub a5, a5, t1
-; RV32I-NEXT: sub a4, t0, a4
-; RV32I-NEXT: sub a2, a1, a2
-; RV32I-NEXT: sub a1, a6, a3
-; RV32I-NEXT: bgez a4, .LBB32_4
+; RV32I-NEXT: sltu a2, a6, t1
+; RV32I-NEXT: sub a1, a1, t0
+; RV32I-NEXT: sub a2, a7, a2
+; RV32I-NEXT: sub a4, a6, t1
+; RV32I-NEXT: sub a3, a5, a3
+; RV32I-NEXT: bgez a2, .LBB32_4
; RV32I-NEXT: # %bb.3:
-; RV32I-NEXT: snez a3, a2
-; RV32I-NEXT: snez a6, a1
-; RV32I-NEXT: neg a7, a5
-; RV32I-NEXT: snez a5, a5
-; RV32I-NEXT: or a3, a6, a3
-; RV32I-NEXT: add a4, a4, a5
-; RV32I-NEXT: add a2, a2, a6
-; RV32I-NEXT: sltu a6, a7, a3
-; RV32I-NEXT: neg a4, a4
-; RV32I-NEXT: sub a5, a7, a3
+; RV32I-NEXT: neg a5, a4
+; RV32I-NEXT: or a6, a3, a1
+; RV32I-NEXT: snez a4, a4
+; RV32I-NEXT: snez a7, a3
+; RV32I-NEXT: snez a6, a6
+; RV32I-NEXT: add a2, a2, a4
+; RV32I-NEXT: add a1, a1, a7
+; RV32I-NEXT: sltu a7, a5, a6
; RV32I-NEXT: neg a2, a2
-; RV32I-NEXT: sub a4, a4, a6
+; RV32I-NEXT: sub a4, a5, a6
; RV32I-NEXT: neg a1, a1
+; RV32I-NEXT: sub a2, a2, a7
+; RV32I-NEXT: neg a3, a3
; RV32I-NEXT: .LBB32_4:
-; RV32I-NEXT: sw a1, 0(a0)
-; RV32I-NEXT: sw a2, 4(a0)
-; RV32I-NEXT: sw a5, 8(a0)
-; RV32I-NEXT: sw a4, 12(a0)
+; RV32I-NEXT: sw a3, 0(a0)
+; RV32I-NEXT: sw a1, 4(a0)
+; RV32I-NEXT: sw a4, 8(a0)
+; RV32I-NEXT: sw a2, 12(a0)
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_subnsw_i128_undef:
@@ -2207,48 +2207,48 @@ define i128 @abd_subnsw_i128_undef(i128 %a, i128 %b) nounwind {
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: lw a3, 0(a2)
; RV32ZBB-NEXT: lw a4, 4(a2)
-; RV32ZBB-NEXT: lw a5, 8(a2)
-; RV32ZBB-NEXT: lw a2, 12(a2)
-; RV32ZBB-NEXT: lw a7, 8(a1)
+; RV32ZBB-NEXT: lw a6, 8(a2)
+; RV32ZBB-NEXT: lw a7, 12(a2)
+; RV32ZBB-NEXT: lw a2, 8(a1)
; RV32ZBB-NEXT: lw t0, 12(a1)
-; RV32ZBB-NEXT: lw a6, 0(a1)
+; RV32ZBB-NEXT: lw a5, 0(a1)
; RV32ZBB-NEXT: lw a1, 4(a1)
-; RV32ZBB-NEXT: sltu t1, a7, a5
-; RV32ZBB-NEXT: sub t0, t0, a2
-; RV32ZBB-NEXT: sltu a2, a6, a3
-; RV32ZBB-NEXT: sub t0, t0, t1
-; RV32ZBB-NEXT: mv t1, a2
+; RV32ZBB-NEXT: sltu t1, a2, a6
+; RV32ZBB-NEXT: sub a7, t0, a7
+; RV32ZBB-NEXT: sltu t0, a5, a3
+; RV32ZBB-NEXT: sub a7, a7, t1
+; RV32ZBB-NEXT: mv t1, t0
; RV32ZBB-NEXT: beq a1, a4, .LBB32_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: sltu t1, a1, a4
; RV32ZBB-NEXT: .LBB32_2:
-; RV32ZBB-NEXT: sub a5, a7, a5
+; RV32ZBB-NEXT: sub a6, a2, a6
; RV32ZBB-NEXT: sub a1, a1, a4
-; RV32ZBB-NEXT: sltu a4, a5, t1
-; RV32ZBB-NEXT: sub a5, a5, t1
-; RV32ZBB-NEXT: sub a4, t0, a4
-; RV32ZBB-NEXT: sub a2, a1, a2
-; RV32ZBB-NEXT: sub a1, a6, a3
-; RV32ZBB-NEXT: bgez a4, .LBB32_4
+; RV32ZBB-NEXT: sltu a2, a6, t1
+; RV32ZBB-NEXT: sub a1, a1, t0
+; RV32ZBB-NEXT: sub a2, a7, a2
+; RV32ZBB-NEXT: sub a4, a6, t1
+; RV32ZBB-NEXT: sub a3, a5, a3
+; RV32ZBB-NEXT: bgez a2, .LBB32_4
; RV32ZBB-NEXT: # %bb.3:
-; RV32ZBB-NEXT: snez a3, a2
-; RV32ZBB-NEXT: snez a6, a1
-; RV32ZBB-NEXT: neg a7, a5
-; RV32ZBB-NEXT: snez a5, a5
-; RV32ZBB-NEXT: or a3, a6, a3
-; RV32ZBB-NEXT: add a4, a4, a5
-; RV32ZBB-NEXT: add a2, a2, a6
-; RV32ZBB-NEXT: sltu a6, a7, a3
-; RV32ZBB-NEXT: neg a4, a4
-; RV32ZBB-NEXT: sub a5, a7, a3
+; RV32ZBB-NEXT: neg a5, a4
+; RV32ZBB-NEXT: or a6, a3, a1
+; RV32ZBB-NEXT: snez a4, a4
+; RV32ZBB-NEXT: snez a7, a3
+; RV32ZBB-NEXT: snez a6, a6
+; RV32ZBB-NEXT: add a2, a2, a4
+; RV32ZBB-NEXT: add a1, a1, a7
+; RV32ZBB-NEXT: sltu a7, a5, a6
; RV32ZBB-NEXT: neg a2, a2
-; RV32ZBB-NEXT: sub a4, a4, a6
+; RV32ZBB-NEXT: sub a4, a5, a6
; RV32ZBB-NEXT: neg a1, a1
+; RV32ZBB-NEXT: sub a2, a2, a7
+; RV32ZBB-NEXT: neg a3, a3
; RV32ZBB-NEXT: .LBB32_4:
-; RV32ZBB-NEXT: sw a1, 0(a0)
-; RV32ZBB-NEXT: sw a2, 4(a0)
-; RV32ZBB-NEXT: sw a5, 8(a0)
-; RV32ZBB-NEXT: sw a4, 12(a0)
+; RV32ZBB-NEXT: sw a3, 0(a0)
+; RV32ZBB-NEXT: sw a1, 4(a0)
+; RV32ZBB-NEXT: sw a4, 8(a0)
+; RV32ZBB-NEXT: sw a2, 12(a0)
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_subnsw_i128_undef:
diff --git a/llvm/test/CodeGen/RISCV/abdu-neg.ll b/llvm/test/CodeGen/RISCV/abdu-neg.ll
index 713b52f53e3d9..405c0b905ca16 100644
--- a/llvm/test/CodeGen/RISCV/abdu-neg.ll
+++ b/llvm/test/CodeGen/RISCV/abdu-neg.ll
@@ -624,87 +624,88 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind {
; RV32I-LABEL: abd_ext_i128:
; RV32I: # %bb.0:
-; RV32I-NEXT: lw a5, 0(a2)
-; RV32I-NEXT: lw a7, 4(a2)
-; RV32I-NEXT: lw a3, 8(a2)
-; RV32I-NEXT: lw t1, 12(a2)
-; RV32I-NEXT: lw a4, 8(a1)
-; RV32I-NEXT: lw a6, 12(a1)
-; RV32I-NEXT: lw a2, 0(a1)
+; RV32I-NEXT: lw a4, 0(a2)
+; RV32I-NEXT: lw a6, 4(a2)
+; RV32I-NEXT: lw t1, 8(a2)
+; RV32I-NEXT: lw a2, 12(a2)
+; RV32I-NEXT: lw a3, 8(a1)
+; RV32I-NEXT: lw a5, 12(a1)
+; RV32I-NEXT: lw a7, 0(a1)
; RV32I-NEXT: lw t0, 4(a1)
-; RV32I-NEXT: sltu a1, a4, a3
-; RV32I-NEXT: sub t1, a6, t1
-; RV32I-NEXT: sltu t2, a2, a5
-; RV32I-NEXT: sub a1, t1, a1
-; RV32I-NEXT: mv t1, t2
-; RV32I-NEXT: beq t0, a7, .LBB11_2
+; RV32I-NEXT: sltu a1, a3, t1
+; RV32I-NEXT: sub a2, a5, a2
+; RV32I-NEXT: sltu t2, a7, a4
+; RV32I-NEXT: sub a1, a2, a1
+; RV32I-NEXT: mv a2, t2
+; RV32I-NEXT: beq t0, a6, .LBB11_2
; RV32I-NEXT: # %bb.1:
-; RV32I-NEXT: sltu t1, t0, a7
+; RV32I-NEXT: sltu a2, t0, a6
; RV32I-NEXT: .LBB11_2:
-; RV32I-NEXT: sub a3, a4, a3
-; RV32I-NEXT: sltu t3, a3, t1
+; RV32I-NEXT: sub t1, a3, t1
+; RV32I-NEXT: sltu t3, t1, a2
; RV32I-NEXT: sub a1, a1, t3
-; RV32I-NEXT: sub a3, a3, t1
-; RV32I-NEXT: beq a1, a6, .LBB11_4
+; RV32I-NEXT: sub a2, t1, a2
+; RV32I-NEXT: beq a1, a5, .LBB11_4
; RV32I-NEXT: # %bb.3:
-; RV32I-NEXT: sltu t1, a6, a1
+; RV32I-NEXT: sltu t1, a5, a1
; RV32I-NEXT: j .LBB11_5
; RV32I-NEXT: .LBB11_4:
-; RV32I-NEXT: sltu t1, a4, a3
+; RV32I-NEXT: sltu t1, a3, a2
; RV32I-NEXT: .LBB11_5:
-; RV32I-NEXT: sub a7, t0, a7
-; RV32I-NEXT: sub a7, a7, t2
-; RV32I-NEXT: sub a5, a2, a5
-; RV32I-NEXT: beq a7, t0, .LBB11_7
+; RV32I-NEXT: sub a6, t0, a6
+; RV32I-NEXT: sub a6, a6, t2
+; RV32I-NEXT: sub t2, a7, a4
+; RV32I-NEXT: beq a6, t0, .LBB11_7
; RV32I-NEXT: # %bb.6:
-; RV32I-NEXT: sltu a2, t0, a7
+; RV32I-NEXT: sltu a4, t0, a6
; RV32I-NEXT: j .LBB11_8
; RV32I-NEXT: .LBB11_7:
-; RV32I-NEXT: sltu a2, a2, a5
+; RV32I-NEXT: sltu a4, a7, t2
; RV32I-NEXT: .LBB11_8:
-; RV32I-NEXT: xor a6, a1, a6
-; RV32I-NEXT: xor a4, a3, a4
-; RV32I-NEXT: or a4, a4, a6
-; RV32I-NEXT: beqz a4, .LBB11_10
+; RV32I-NEXT: xor a5, a1, a5
+; RV32I-NEXT: xor a3, a2, a3
+; RV32I-NEXT: or a3, a3, a5
+; RV32I-NEXT: beqz a3, .LBB11_10
; RV32I-NEXT: # %bb.9:
-; RV32I-NEXT: mv a2, t1
+; RV32I-NEXT: mv a4, t1
; RV32I-NEXT: .LBB11_10:
-; RV32I-NEXT: neg a4, a2
-; RV32I-NEXT: xor t0, a5, a4
-; RV32I-NEXT: xor t3, a7, a4
-; RV32I-NEXT: sltu a5, t0, a4
-; RV32I-NEXT: add a6, t3, a2
-; RV32I-NEXT: add t0, t0, a2
-; RV32I-NEXT: sub t1, a6, a5
-; RV32I-NEXT: snez a6, t1
-; RV32I-NEXT: snez t2, t0
-; RV32I-NEXT: or a6, t2, a6
-; RV32I-NEXT: beqz a7, .LBB11_12
+; RV32I-NEXT: neg a5, a4
+; RV32I-NEXT: xor t0, t2, a5
+; RV32I-NEXT: sltu a7, t0, a5
+; RV32I-NEXT: xor t1, a6, a5
+; RV32I-NEXT: mv a3, a7
+; RV32I-NEXT: beqz a6, .LBB11_12
; RV32I-NEXT: # %bb.11:
-; RV32I-NEXT: sltu a5, t3, a4
+; RV32I-NEXT: sltu a3, t1, a5
; RV32I-NEXT: .LBB11_12:
-; RV32I-NEXT: xor a3, a3, a4
-; RV32I-NEXT: xor a1, a1, a4
-; RV32I-NEXT: add t1, t1, t2
-; RV32I-NEXT: neg a7, t0
-; RV32I-NEXT: add t0, a3, a2
-; RV32I-NEXT: sltu a3, a3, a4
+; RV32I-NEXT: xor a2, a2, a5
+; RV32I-NEXT: add t1, t1, a4
+; RV32I-NEXT: add t0, t0, a4
+; RV32I-NEXT: xor a1, a1, a5
+; RV32I-NEXT: add a6, a2, a4
+; RV32I-NEXT: sub a7, t1, a7
+; RV32I-NEXT: sltu a2, a2, a5
+; RV32I-NEXT: add a1, a1, a4
+; RV32I-NEXT: snez a4, t0
+; RV32I-NEXT: neg a5, t0
+; RV32I-NEXT: sub t1, a6, a3
+; RV32I-NEXT: or t0, t0, a7
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: sltu a2, a6, a3
+; RV32I-NEXT: add a4, a7, a4
+; RV32I-NEXT: neg a3, t1
+; RV32I-NEXT: snez a6, t0
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: snez a2, t1
+; RV32I-NEXT: neg a4, a4
+; RV32I-NEXT: sltu a7, a3, a6
; RV32I-NEXT: add a1, a1, a2
-; RV32I-NEXT: neg a2, t1
-; RV32I-NEXT: sub a4, t0, a5
-; RV32I-NEXT: sub a1, a1, a3
-; RV32I-NEXT: sltu a3, t0, a5
-; RV32I-NEXT: neg a5, a4
-; RV32I-NEXT: sub a1, a1, a3
-; RV32I-NEXT: snez a3, a4
-; RV32I-NEXT: sltu a4, a5, a6
-; RV32I-NEXT: add a1, a1, a3
-; RV32I-NEXT: sub a3, a5, a6
+; RV32I-NEXT: sub a2, a3, a6
; RV32I-NEXT: neg a1, a1
-; RV32I-NEXT: sub a1, a1, a4
-; RV32I-NEXT: sw a7, 0(a0)
-; RV32I-NEXT: sw a2, 4(a0)
-; RV32I-NEXT: sw a3, 8(a0)
+; RV32I-NEXT: sub a1, a1, a7
+; RV32I-NEXT: sw a5, 0(a0)
+; RV32I-NEXT: sw a4, 4(a0)
+; RV32I-NEXT: sw a2, 8(a0)
; RV32I-NEXT: sw a1, 12(a0)
; RV32I-NEXT: ret
;
@@ -736,87 +737,88 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind {
;
; RV32ZBB-LABEL: abd_ext_i128:
; RV32ZBB: # %bb.0:
-; RV32ZBB-NEXT: lw a5, 0(a2)
-; RV32ZBB-NEXT: lw a7, 4(a2)
-; RV32ZBB-NEXT: lw a3, 8(a2)
-; RV32ZBB-NEXT: lw t1, 12(a2)
-; RV32ZBB-NEXT: lw a4, 8(a1)
-; RV32ZBB-NEXT: lw a6, 12(a1)
-; RV32ZBB-NEXT: lw a2, 0(a1)
+; RV32ZBB-NEXT: lw a4, 0(a2)
+; RV32ZBB-NEXT: lw a6, 4(a2)
+; RV32ZBB-NEXT: lw t1, 8(a2)
+; RV32ZBB-NEXT: lw a2, 12(a2)
+; RV32ZBB-NEXT: lw a3, 8(a1)
+; RV32ZBB-NEXT: lw a5, 12(a1)
+; RV32ZBB-NEXT: lw a7, 0(a1)
; RV32ZBB-NEXT: lw t0, 4(a1)
-; RV32ZBB-NEXT: sltu a1, a4, a3
-; RV32ZBB-NEXT: sub t1, a6, t1
-; RV32ZBB-NEXT: sltu t2, a2, a5
-; RV32ZBB-NEXT: sub a1, t1, a1
-; RV32ZBB-NEXT: mv t1, t2
-; RV32ZBB-NEXT: beq t0, a7, .LBB11_2
+; RV32ZBB-NEXT: sltu a1, a3, t1
+; RV32ZBB-NEXT: sub a2, a5, a2
+; RV32ZBB-NEXT: sltu t2, a7, a4
+; RV32ZBB-NEXT: sub a1, a2, a1
+; RV32ZBB-NEXT: mv a2, t2
+; RV32ZBB-NEXT: beq t0, a6, .LBB11_2
; RV32ZBB-NEXT: # %bb.1:
-; RV32ZBB-NEXT: sltu t1, t0, a7
+; RV32ZBB-NEXT: sltu a2, t0, a6
; RV32ZBB-NEXT: .LBB11_2:
-; RV32ZBB-NEXT: sub a3, a4, a3
-; RV32ZBB-NEXT: sltu t3, a3, t1
+; RV32ZBB-NEXT: sub t1, a3, t1
+; RV32ZBB-NEXT: sltu t3, t1, a2
; RV32ZBB-NEXT: sub a1, a1, t3
-; RV32ZBB-NEXT: sub a3, a3, t1
-; RV32ZBB-NEXT: beq a1, a6, .LBB11_4
+; RV32ZBB-NEXT: sub a2, t1, a2
+; RV32ZBB-NEXT: beq a1, a5, .LBB11_4
; RV32ZBB-NEXT: # %bb.3:
-; RV32ZBB-NEXT: sltu t1, a6, a1
+; RV32ZBB-NEXT: sltu t1, a5, a1
; RV32ZBB-NEXT: j .LBB11_5
; RV32ZBB-NEXT: .LBB11_4:
-; RV32ZBB-NEXT: sltu t1, a4, a3
+; RV32ZBB-NEXT: sltu t1, a3, a2
; RV32ZBB-NEXT: .LBB11_5:
-; RV32ZBB-NEXT: sub a7, t0, a7
-; RV32ZBB-NEXT: sub a7, a7, t2
-; RV32ZBB-NEXT: sub a5, a2, a5
-; RV32ZBB-NEXT: beq a7, t0, .LBB11_7
+; RV32ZBB-NEXT: sub a6, t0, a6
+; RV32ZBB-NEXT: sub a6, a6, t2
+; RV32ZBB-NEXT: sub t2, a7, a4
+; RV32ZBB-NEXT: beq a6, t0, .LBB11_7
; RV32ZBB-NEXT: # %bb.6:
-; RV32ZBB-NEXT: sltu a2, t0, a7
+; RV32ZBB-NEXT: sltu a4, t0, a6
; RV32ZBB-NEXT: j .LBB11_8
; RV32ZBB-NEXT: .LBB11_7:
-; RV32ZBB-NEXT: sltu a2, a2, a5
+; RV32ZBB-NEXT: sltu a4, a7, t2
; RV32ZBB-NEXT: .LBB11_8:
-; RV32ZBB-NEXT: xor a6, a1, a6
-; RV32ZBB-NEXT: xor a4, a3, a4
-; RV32ZBB-NEXT: or a4, a4, a6
-; RV32ZBB-NEXT: beqz a4, .LBB11_10
+; RV32ZBB-NEXT: xor a5, a1, a5
+; RV32ZBB-NEXT: xor a3, a2, a3
+; RV32ZBB-NEXT: or a3, a3, a5
+; RV32ZBB-NEXT: beqz a3, .LBB11_10
; RV32ZBB-NEXT: # %bb.9:
-; RV32ZBB-NEXT: mv a2, t1
+; RV32ZBB-NEXT: mv a4, t1
; RV32ZBB-NEXT: .LBB11_10:
-; RV32ZBB-NEXT: neg a4, a2
-; RV32ZBB-NEXT: xor t0, a5, a4
-; RV32ZBB-NEXT: xor t3, a7, a4
-; RV32ZBB-NEXT: sltu a5, t0, a4
-; RV32ZBB-NEXT: add a6, t3, a2
-; RV32ZBB-NEXT: add t0, t0, a2
-; RV32ZBB-NEXT: sub t1, a6, a5
-; RV32ZBB-NEXT: snez a6, t1
-; RV32ZBB-NEXT: snez t2, t0
-; RV32ZBB-NEXT: or a6, t2, a6
-; RV32ZBB-NEXT: beqz a7, .LBB11_12
+; RV32ZBB-NEXT: neg a5, a4
+; RV32ZBB-NEXT: xor t0, t2, a5
+; RV32ZBB-NEXT: sltu a7, t0, a5
+; RV32ZBB-NEXT: xor t1, a6, a5
+; RV32ZBB-NEXT: mv a3, a7
+; RV32ZBB-NEXT: beqz a6, .LBB11_12
; RV32ZBB-NEXT: # %bb.11:
-; RV32ZBB-NEXT: sltu a5, t3, a4
+; RV32ZBB-NEXT: sltu a3, t1, a5
; RV32ZBB-NEXT: .LBB11_12:
-; RV32ZBB-NEXT: xor a3, a3, a4
-; RV32ZBB-NEXT: xor a1, a1, a4
-; RV32ZBB-NEXT: add t1, t1, t2
-; RV32ZBB-NEXT: neg a7, t0
-; RV32ZBB-NEXT: add t0, a3, a2
-; RV32ZBB-NEXT: sltu a3, a3, a4
+; RV32ZBB-NEXT: xor a2, a2, a5
+; RV32ZBB-NEXT: add t1, t1, a4
+; RV32ZBB-NEXT: add t0, t0, a4
+; RV32ZBB-NEXT: xor a1, a1, a5
+; RV32ZBB-NEXT: add a6, a2, a4
+; RV32ZBB-NEXT: sub a7, t1, a7
+; RV32ZBB-NEXT: sltu a2, a2, a5
+; RV32ZBB-NEXT: add a1, a1, a4
+; RV32ZBB-NEXT: snez a4, t0
+; RV32ZBB-NEXT: neg a5, t0
+; RV32ZBB-NEXT: sub t1, a6, a3
+; RV32ZBB-NEXT: or t0, t0, a7
+; RV32ZBB-NEXT: sub a1, a1, a2
+; RV32ZBB-NEXT: sltu a2, a6, a3
+; RV32ZBB-NEXT: add a4, a7, a4
+; RV32ZBB-NEXT: neg a3, t1
+; RV32ZBB-NEXT: snez a6, t0
+; RV32ZBB-NEXT: sub a1, a1, a2
+; RV32ZBB-NEXT: snez a2, t1
+; RV32ZBB-NEXT: neg a4, a4
+; RV32ZBB-NEXT: sltu a7, a3, a6
; RV32ZBB-NEXT: add a1, a1, a2
-; RV32ZBB-NEXT: neg a2, t1
-; RV32ZBB-NEXT: sub a4, t0, a5
-; RV32ZBB-NEXT: sub a1, a1, a3
-; RV32ZBB-NEXT: sltu a3, t0, a5
-; RV32ZBB-NEXT: neg a5, a4
-; RV32ZBB-NEXT: sub a1, a1, a3
-; RV32ZBB-NEXT: snez a3, a4
-; RV32ZBB-NEXT: sltu a4, a5, a6
-; RV32ZBB-NEXT: add a1, a1, a3
-; RV32ZBB-NEXT: sub a3, a5, a6
+; RV32ZBB-NEXT: sub a2, a3, a6
; RV32ZBB-NEXT: neg a1, a1
-; RV32ZBB-NEXT: sub a1, a1, a4
-; RV32ZBB-NEXT: sw a7, 0(a0)
-; RV32ZBB-NEXT: sw a2, 4(a0)
-; RV32ZBB-NEXT: sw a3, 8(a0)
+; RV32ZBB-NEXT: sub a1, a1, a7
+; RV32ZBB-NEXT: sw a5, 0(a0)
+; RV32ZBB-NEXT: sw a4, 4(a0)
+; RV32ZBB-NEXT: sw a2, 8(a0)
; RV32ZBB-NEXT: sw a1, 12(a0)
; RV32ZBB-NEXT: ret
;
@@ -857,87 +859,88 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind {
define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind {
; RV32I-LABEL: abd_ext_i128_undef:
; RV32I: # %bb.0:
-; RV32I-NEXT: lw a5, 0(a2)
-; RV32I-NEXT: lw a7, 4(a2)
-; RV32I-NEXT: lw a3, 8(a2)
-; RV32I-NEXT: lw t1, 12(a2)
-; RV32I-NEXT: lw a4, 8(a1)
-; RV32I-NEXT: lw a6, 12(a1)
-; RV32I-NEXT: lw a2, 0(a1)
+; RV32I-NEXT: lw a4, 0(a2)
+; RV32I-NEXT: lw a6, 4(a2)
+; RV32I-NEXT: lw t1, 8(a2)
+; RV32I-NEXT: lw a2, 12(a2)
+; RV32I-NEXT: lw a3, 8(a1)
+; RV32I-NEXT: lw a5, 12(a1)
+; RV32I-NEXT: lw a7, 0(a1)
; RV32I-NEXT: lw t0, 4(a1)
-; RV32I-NEXT: sltu a1, a4, a3
-; RV32I-NEXT: sub t1, a6, t1
-; RV32I-NEXT: sltu t2, a2, a5
-; RV32I-NEXT: sub a1, t1, a1
-; RV32I-NEXT: mv t1, t2
-; RV32I-NEXT: beq t0, a7, .LBB12_2
+; RV32I-NEXT: sltu a1, a3, t1
+; RV32I-NEXT: sub a2, a5, a2
+; RV32I-NEXT: sltu t2, a7, a4
+; RV32I-NEXT: sub a1, a2, a1
+; RV32I-NEXT: mv a2, t2
+; RV32I-NEXT: beq t0, a6, .LBB12_2
; RV32I-NEXT: # %bb.1:
-; RV32I-NEXT: sltu t1, t0, a7
+; RV32I-NEXT: sltu a2, t0, a6
; RV32I-NEXT: .LBB12_2:
-; RV32I-NEXT: sub a3, a4, a3
-; RV32I-NEXT: sltu t3, a3, t1
+; RV32I-NEXT: sub t1, a3, t1
+; RV32I-NEXT: sltu t3, t1, a2
; RV32I-NEXT: sub a1, a1, t3
-; RV32I-NEXT: sub a3, a3, t1
-; RV32I-NEXT: beq a1, a6, .LBB12_4
+; RV32I-NEXT: sub a2, t1, a2
+; RV32I-NEXT: beq a1, a5, .LBB12_4
; RV32I-NEXT: # %bb.3:
-; RV32I-NEXT: sltu t1, a6, a1
+; RV32I-NEXT: sltu t1, a5, a1
; RV32I-NEXT: j .LBB12_5
; RV32I-NEXT: .LBB12_4:
-; RV32I-NEXT: sltu t1, a4, a3
+; RV32I-NEXT: sltu t1, a3, a2
; RV32I-NEXT: .LBB12_5:
-; RV32I-NEXT: sub a7, t0, a7
-; RV32I-NEXT: sub a7, a7, t2
-; RV32I-NEXT: sub a5, a2, a5
-; RV32I-NEXT: beq a7, t0, .LBB12_7
+; RV32I-NEXT: sub a6, t0, a6
+; RV32I-NEXT: sub a6, a6, t2
+; RV32I-NEXT: sub t2, a7, a4
+; RV32I-NEXT: beq a6, t0, .LBB12_7
; RV32I-NEXT: # %bb.6:
-; RV32I-NEXT: sltu a2, t0, a7
+; RV32I-NEXT: sltu a4, t0, a6
; RV32I-NEXT: j .LBB12_8
; RV32I-NEXT: .LBB12_7:
-; RV32I-NEXT: sltu a2, a2, a5
+; RV32I-NEXT: sltu a4, a7, t2
; RV32I-NEXT: .LBB12_8:
-; RV32I-NEXT: xor a6, a1, a6
-; RV32I-NEXT: xor a4, a3, a4
-; RV32I-NEXT: or a4, a4, a6
-; RV32I-NEXT: beqz a4, .LBB12_10
+; RV32I-NEXT: xor a5, a1, a5
+; RV32I-NEXT: xor a3, a2, a3
+; RV32I-NEXT: or a3, a3, a5
+; RV32I-NEXT: beqz a3, .LBB12_10
; RV32I-NEXT: # %bb.9:
-; RV32I-NEXT: mv a2, t1
+; RV32I-NEXT: mv a4, t1
; RV32I-NEXT: .LBB12_10:
-; RV32I-NEXT: neg a4, a2
-; RV32I-NEXT: xor t0, a5, a4
-; RV32I-NEXT: xor t3, a7, a4
-; RV32I-NEXT: sltu a5, t0, a4
-; RV32I-NEXT: add a6, t3, a2
-; RV32I-NEXT: add t0, t0, a2
-; RV32I-NEXT: sub t1, a6, a5
-; RV32I-NEXT: snez a6, t1
-; RV32I-NEXT: snez t2, t0
-; RV32I-NEXT: or a6, t2, a6
-; RV32I-NEXT: beqz a7, .LBB12_12
+; RV32I-NEXT: neg a5, a4
+; RV32I-NEXT: xor t0, t2, a5
+; RV32I-NEXT: sltu a7, t0, a5
+; RV32I-NEXT: xor t1, a6, a5
+; RV32I-NEXT: mv a3, a7
+; RV32I-NEXT: beqz a6, .LBB12_12
; RV32I-NEXT: # %bb.11:
-; RV32I-NEXT: sltu a5, t3, a4
+; RV32I-NEXT: sltu a3, t1, a5
; RV32I-NEXT: .LBB12_12:
-; RV32I-NEXT: xor a3, a3, a4
-; RV32I-NEXT: xor a1, a1, a4
-; RV32I-NEXT: add t1, t1, t2
-; RV32I-NEXT: neg a7, t0
-; RV32I-NEXT: add t0, a3, a2
-; RV32I-NEXT: sltu a3, a3, a4
+; RV32I-NEXT: xor a2, a2, a5
+; RV32I-NEXT: add t1, t1, a4
+; RV32I-NEXT: add t0, t0, a4
+; RV32I-NEXT: xor a1, a1, a5
+; RV32I-NEXT: add a6, a2, a4
+; RV32I-NEXT: sub a7, t1, a7
+; RV32I-NEXT: sltu a2, a2, a5
+; RV32I-NEXT: add a1, a1, a4
+; RV32I-NEXT: snez a4, t0
+; RV32I-NEXT: neg a5, t0
+; RV32I-NEXT: sub t1, a6, a3
+; RV32I-NEXT: or t0, t0, a7
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: sltu a2, a6, a3
+; RV32I-NEXT: add a4, a7, a4
+; RV32I-NEXT: neg a3, t1
+; RV32I-NEXT: snez a6, t0
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: snez a2, t1
+; RV32I-NEXT: neg a4, a4
+; RV32I-NEXT: sltu a7, a3, a6
; RV32I-NEXT: add a1, a1, a2
-; RV32I-NEXT: neg a2, t1
-; RV32I-NEXT: sub a4, t0, a5
-; RV32I-NEXT: sub a1, a1, a3
-; RV32I-NEXT: sltu a3, t0, a5
-; RV32I-NEXT: neg a5, a4
-; RV32I-NEXT: sub a1, a1, a3
-; RV32I-NEXT: snez a3, a4
-; RV32I-NEXT: sltu a4, a5, a6
-; RV32I-NEXT: add a1, a1, a3
-; RV32I-NEXT: sub a3, a5, a6
+; RV32I-NEXT: sub a2, a3, a6
; RV32I-NEXT: neg a1, a1
-; RV32I-NEXT: sub a1, a1, a4
-; RV32I-NEXT: sw a7, 0(a0)
-; RV32I-NEXT: sw a2, 4(a0)
-; RV32I-NEXT: sw a3, 8(a0)
+; RV32I-NEXT: sub a1, a1, a7
+; RV32I-NEXT: sw a5, 0(a0)
+; RV32I-NEXT: sw a4, 4(a0)
+; RV32I-NEXT: sw a2, 8(a0)
; RV32I-NEXT: sw a1, 12(a0)
; RV32I-NEXT: ret
;
@@ -969,87 +972,88 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind {
;
; RV32ZBB-LABEL: abd_ext_i128_undef:
; RV32ZBB: # %bb.0:
-; RV32ZBB-NEXT: lw a5, 0(a2)
-; RV32ZBB-NEXT: lw a7, 4(a2)
-; RV32ZBB-NEXT: lw a3, 8(a2)
-; RV32ZBB-NEXT: lw t1, 12(a2)
-; RV32ZBB-NEXT: lw a4, 8(a1)
-; RV32ZBB-NEXT: lw a6, 12(a1)
-; RV32ZBB-NEXT: lw a2, 0(a1)
+; RV32ZBB-NEXT: lw a4, 0(a2)
+; RV32ZBB-NEXT: lw a6, 4(a2)
+; RV32ZBB-NEXT: lw t1, 8(a2)
+; RV32ZBB-NEXT: lw a2, 12(a2)
+; RV32ZBB-NEXT: lw a3, 8(a1)
+; RV32ZBB-NEXT: lw a5, 12(a1)
+; RV32ZBB-NEXT: lw a7, 0(a1)
; RV32ZBB-NEXT: lw t0, 4(a1)
-; RV32ZBB-NEXT: sltu a1, a4, a3
-; RV32ZBB-NEXT: sub t1, a6, t1
-; RV32ZBB-NEXT: sltu t2, a2, a5
-; RV32ZBB-NEXT: sub a1, t1, a1
-; RV32ZBB-NEXT: mv t1, t2
-; RV32ZBB-NEXT: beq t0, a7, .LBB12_2
+; RV32ZBB-NEXT: sltu a1, a3, t1
+; RV32ZBB-NEXT: sub a2, a5, a2
+; RV32ZBB-NEXT: sltu t2, a7, a4
+; RV32ZBB-NEXT: sub a1, a2, a1
+; RV32ZBB-NEXT: mv a2, t2
+; RV32ZBB-NEXT: beq t0, a6, .LBB12_2
; RV32ZBB-NEXT: # %bb.1:
-; RV32ZBB-NEXT: sltu t1, t0, a7
+; RV32ZBB-NEXT: sltu a2, t0, a6
; RV32ZBB-NEXT: .LBB12_2:
-; RV32ZBB-NEXT: sub a3, a4, a3
-; RV32ZBB-NEXT: sltu t3, a3, t1
+; RV32ZBB-NEXT: sub t1, a3, t1
+; RV32ZBB-NEXT: sltu t3, t1, a2
; RV32ZBB-NEXT: sub a1, a1, t3
-; RV32ZBB-NEXT: sub a3, a3, t1
-; RV32ZBB-NEXT: beq a1, a6, .LBB12_4
+; RV32ZBB-NEXT: sub a2, t1, a2
+; RV32ZBB-NEXT: beq a1, a5, .LBB12_4
; RV32ZBB-NEXT: # %bb.3:
-; RV32ZBB-NEXT: sltu t1, a6, a1
+; RV32ZBB-NEXT: sltu t1, a5, a1
; RV32ZBB-NEXT: j .LBB12_5
; RV32ZBB-NEXT: .LBB12_4:
-; RV32ZBB-NEXT: sltu t1, a4, a3
+; RV32ZBB-NEXT: sltu t1, a3, a2
; RV32ZBB-NEXT: .LBB12_5:
-; RV32ZBB-NEXT: sub a7, t0, a7
-; RV32ZBB-NEXT: sub a7, a7, t2
-; RV32ZBB-NEXT: sub a5, a2, a5
-; RV32ZBB-NEXT: beq a7, t0, .LBB12_7
+; RV32ZBB-NEXT: sub a6, t0, a6
+; RV32ZBB-NEXT: sub a6, a6, t2
+; RV32ZBB-NEXT: sub t2, a7, a4
+; RV32ZBB-NEXT: beq a6, t0, .LBB12_7
; RV32ZBB-NEXT: # %bb.6:
-; RV32ZBB-NEXT: sltu a2, t0, a7
+; RV32ZBB-NEXT: sltu a4, t0, a6
; RV32ZBB-NEXT: j .LBB12_8
; RV32ZBB-NEXT: .LBB12_7:
-; RV32ZBB-NEXT: sltu a2, a2, a5
+; RV32ZBB-NEXT: sltu a4, a7, t2
; RV32ZBB-NEXT: .LBB12_8:
-; RV32ZBB-NEXT: xor a6, a1, a6
-; RV32ZBB-NEXT: xor a4, a3, a4
-; RV32ZBB-NEXT: or a4, a4, a6
-; RV32ZBB-NEXT: beqz a4, .LBB12_10
+; RV32ZBB-NEXT: xor a5, a1, a5
+; RV32ZBB-NEXT: xor a3, a2, a3
+; RV32ZBB-NEXT: or a3, a3, a5
+; RV32ZBB-NEXT: beqz a3, .LBB12_10
; RV32ZBB-NEXT: # %bb.9:
-; RV32ZBB-NEXT: mv a2, t1
+; RV32ZBB-NEXT: mv a4, t1
; RV32ZBB-NEXT: .LBB12_10:
-; RV32ZBB-NEXT: neg a4, a2
-; RV32ZBB-NEXT: xor t0, a5, a4
-; RV32ZBB-NEXT: xor t3, a7, a4
-; RV32ZBB-NEXT: sltu a5, t0, a4
-; RV32ZBB-NEXT: add a6, t3, a2
-; RV32ZBB-NEXT: add t0, t0, a2
-; RV32ZBB-NEXT: sub t1, a6, a5
-; RV32ZBB-NEXT: snez a6, t1
-; RV32ZBB-NEXT: snez t2, t0
-; RV32ZBB-NEXT: or a6, t2, a6
-; RV32ZBB-NEXT: beqz a7, .LBB12_12
+; RV32ZBB-NEXT: neg a5, a4
+; RV32ZBB-NEXT: xor t0, t2, a5
+; RV32ZBB-NEXT: sltu a7, t0, a5
+; RV32ZBB-NEXT: xor t1, a6, a5
+; RV32ZBB-NEXT: mv a3, a7
+; RV32ZBB-NEXT: beqz a6, .LBB12_12
; RV32ZBB-NEXT: # %bb.11:
-; RV32ZBB-NEXT: sltu a5, t3, a4
+; RV32ZBB-NEXT: sltu a3, t1, a5
; RV32ZBB-NEXT: .LBB12_12:
-; RV32ZBB-NEXT: xor a3, a3, a4
-; RV32ZBB-NEXT: xor a1, a1, a4
-; RV32ZBB-NEXT: add t1, t1, t2
-; RV32ZBB-NEXT: neg a7, t0
-; RV32ZBB-NEXT: add t0, a3, a2
-; RV32ZBB-NEXT: sltu a3, a3, a4
+; RV32ZBB-NEXT: xor a2, a2, a5
+; RV32ZBB-NEXT: add t1, t1, a4
+; RV32ZBB-NEXT: add t0, t0, a4
+; RV32ZBB-NEXT: xor a1, a1, a5
+; RV32ZBB-NEXT: add a6, a2, a4
+; RV32ZBB-NEXT: sub a7, t1, a7
+; RV32ZBB-NEXT: sltu a2, a2, a5
+; RV32ZBB-NEXT: add a1, a1, a4
+; RV32ZBB-NEXT: snez a4, t0
+; RV32ZBB-NEXT: neg a5, t0
+; RV32ZBB-NEXT: sub t1, a6, a3
+; RV32ZBB-NEXT: or t0, t0, a7
+; RV32ZBB-NEXT: sub a1, a1, a2
+; RV32ZBB-NEXT: sltu a2, a6, a3
+; RV32ZBB-NEXT: add a4, a7, a4
+; RV32ZBB-NEXT: neg a3, t1
+; RV32ZBB-NEXT: snez a6, t0
+; RV32ZBB-NEXT: sub a1, a1, a2
+; RV32ZBB-NEXT: snez a2, t1
+; RV32ZBB-NEXT: neg a4, a4
+; RV32ZBB-NEXT: sltu a7, a3, a6
; RV32ZBB-NEXT: add a1, a1, a2
-; RV32ZBB-NEXT: neg a2, t1
-; RV32ZBB-NEXT: sub a4, t0, a5
-; RV32ZBB-NEXT: sub a1, a1, a3
-; RV32ZBB-NEXT: sltu a3, t0, a5
-; RV32ZBB-NEXT: neg a5, a4
-; RV32ZBB-NEXT: sub a1, a1, a3
-; RV32ZBB-NEXT: snez a3, a4
-; RV32ZBB-NEXT: sltu a4, a5, a6
-; RV32ZBB-NEXT: add a1, a1, a3
-; RV32ZBB-NEXT: sub a3, a5, a6
+; RV32ZBB-NEXT: sub a2, a3, a6
; RV32ZBB-NEXT: neg a1, a1
-; RV32ZBB-NEXT: sub a1, a1, a4
-; RV32ZBB-NEXT: sw a7, 0(a0)
-; RV32ZBB-NEXT: sw a2, 4(a0)
-; RV32ZBB-NEXT: sw a3, 8(a0)
+; RV32ZBB-NEXT: sub a1, a1, a7
+; RV32ZBB-NEXT: sw a5, 0(a0)
+; RV32ZBB-NEXT: sw a4, 4(a0)
+; RV32ZBB-NEXT: sw a2, 8(a0)
; RV32ZBB-NEXT: sw a1, 12(a0)
; RV32ZBB-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
index 18d071cc39bb6..cf6658a5d5eb4 100644
--- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll
+++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
@@ -1091,24 +1091,23 @@ define i64 @stest_f64i64(double %x) {
; RV32IF-NEXT: mv a1, a0
; RV32IF-NEXT: addi a0, sp, 8
; RV32IF-NEXT: call __fixdfti
-; RV32IF-NEXT: lw a3, 8(sp)
; RV32IF-NEXT: lw a1, 12(sp)
-; RV32IF-NEXT: lw a2, 16(sp)
-; RV32IF-NEXT: lw a4, 20(sp)
-; RV32IF-NEXT: lui a0, 524288
-; RV32IF-NEXT: addi a5, a0, -1
+; RV32IF-NEXT: lw a0, 16(sp)
+; RV32IF-NEXT: lw a3, 20(sp)
+; RV32IF-NEXT: lw a4, 8(sp)
+; RV32IF-NEXT: lui a2, 524288
+; RV32IF-NEXT: addi a5, a2, -1
+; RV32IF-NEXT: or a7, a0, a3
; RV32IF-NEXT: beq a1, a5, .LBB18_2
; RV32IF-NEXT: # %bb.1: # %entry
; RV32IF-NEXT: sltu a6, a1, a5
-; RV32IF-NEXT: or a7, a2, a4
; RV32IF-NEXT: bnez a7, .LBB18_3
; RV32IF-NEXT: j .LBB18_4
; RV32IF-NEXT: .LBB18_2:
-; RV32IF-NEXT: sltiu a6, a3, -1
-; RV32IF-NEXT: or a7, a2, a4
+; RV32IF-NEXT: sltiu a6, a4, -1
; RV32IF-NEXT: beqz a7, .LBB18_4
; RV32IF-NEXT: .LBB18_3: # %entry
-; RV32IF-NEXT: srli a6, a4, 31
+; RV32IF-NEXT: srli a6, a3, 31
; RV32IF-NEXT: .LBB18_4: # %entry
; RV32IF-NEXT: neg a7, a6
; RV32IF-NEXT: addi t0, a6, -1
@@ -1116,29 +1115,29 @@ define i64 @stest_f64i64(double %x) {
; RV32IF-NEXT: # %bb.5: # %entry
; RV32IF-NEXT: mv a1, a5
; RV32IF-NEXT: .LBB18_6: # %entry
-; RV32IF-NEXT: or a3, t0, a3
-; RV32IF-NEXT: and a4, a7, a4
-; RV32IF-NEXT: and a2, a7, a2
-; RV32IF-NEXT: beq a1, a0, .LBB18_8
+; RV32IF-NEXT: or a4, t0, a4
+; RV32IF-NEXT: and a3, a7, a3
+; RV32IF-NEXT: and a5, a7, a0
+; RV32IF-NEXT: beq a1, a2, .LBB18_8
; RV32IF-NEXT: # %bb.7: # %entry
-; RV32IF-NEXT: sltu a0, a0, a1
+; RV32IF-NEXT: sltu a0, a2, a1
; RV32IF-NEXT: j .LBB18_9
; RV32IF-NEXT: .LBB18_8:
-; RV32IF-NEXT: snez a0, a3
+; RV32IF-NEXT: snez a0, a4
; RV32IF-NEXT: .LBB18_9: # %entry
-; RV32IF-NEXT: and a2, a2, a4
-; RV32IF-NEXT: li a5, -1
-; RV32IF-NEXT: beq a2, a5, .LBB18_11
+; RV32IF-NEXT: and a5, a5, a3
+; RV32IF-NEXT: li a2, -1
+; RV32IF-NEXT: beq a5, a2, .LBB18_11
; RV32IF-NEXT: # %bb.10: # %entry
-; RV32IF-NEXT: srli a4, a4, 31
-; RV32IF-NEXT: xori a0, a4, 1
+; RV32IF-NEXT: srli a3, a3, 31
+; RV32IF-NEXT: xori a0, a3, 1
; RV32IF-NEXT: .LBB18_11: # %entry
; RV32IF-NEXT: bnez a0, .LBB18_13
; RV32IF-NEXT: # %bb.12: # %entry
; RV32IF-NEXT: lui a1, 524288
; RV32IF-NEXT: .LBB18_13: # %entry
; RV32IF-NEXT: neg a0, a0
-; RV32IF-NEXT: and a0, a0, a3
+; RV32IF-NEXT: and a0, a0, a4
; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IF-NEXT: .cfi_restore ra
; RV32IF-NEXT: addi sp, sp, 32
@@ -1194,24 +1193,23 @@ define i64 @stest_f64i64(double %x) {
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: addi a0, sp, 8
; RV32IFD-NEXT: call __fixdfti
-; RV32IFD-NEXT: lw a3, 8(sp)
; RV32IFD-NEXT: lw a1, 12(sp)
-; RV32IFD-NEXT: lw a2, 16(sp)
-; RV32IFD-NEXT: lw a4, 20(sp)
-; RV32IFD-NEXT: lui a0, 524288
-; RV32IFD-NEXT: addi a5, a0, -1
+; RV32IFD-NEXT: lw a0, 16(sp)
+; RV32IFD-NEXT: lw a3, 20(sp)
+; RV32IFD-NEXT: lw a4, 8(sp)
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: addi a5, a2, -1
+; RV32IFD-NEXT: or a7, a0, a3
; RV32IFD-NEXT: beq a1, a5, .LBB18_2
; RV32IFD-NEXT: # %bb.1: # %entry
; RV32IFD-NEXT: sltu a6, a1, a5
-; RV32IFD-NEXT: or a7, a2, a4
; RV32IFD-NEXT: bnez a7, .LBB18_3
; RV32IFD-NEXT: j .LBB18_4
; RV32IFD-NEXT: .LBB18_2:
-; RV32IFD-NEXT: sltiu a6, a3, -1
-; RV32IFD-NEXT: or a7, a2, a4
+; RV32IFD-NEXT: sltiu a6, a4, -1
; RV32IFD-NEXT: beqz a7, .LBB18_4
; RV32IFD-NEXT: .LBB18_3: # %entry
-; RV32IFD-NEXT: srli a6, a4, 31
+; RV32IFD-NEXT: srli a6, a3, 31
; RV32IFD-NEXT: .LBB18_4: # %entry
; RV32IFD-NEXT: neg a7, a6
; RV32IFD-NEXT: addi t0, a6, -1
@@ -1219,29 +1217,29 @@ define i64 @stest_f64i64(double %x) {
; RV32IFD-NEXT: # %bb.5: # %entry
; RV32IFD-NEXT: mv a1, a5
; RV32IFD-NEXT: .LBB18_6: # %entry
-; RV32IFD-NEXT: or a3, t0, a3
-; RV32IFD-NEXT: and a4, a7, a4
-; RV32IFD-NEXT: and a2, a7, a2
-; RV32IFD-NEXT: beq a1, a0, .LBB18_8
+; RV32IFD-NEXT: or a4, t0, a4
+; RV32IFD-NEXT: and a3, a7, a3
+; RV32IFD-NEXT: and a5, a7, a0
+; RV32IFD-NEXT: beq a1, a2, .LBB18_8
; RV32IFD-NEXT: # %bb.7: # %entry
-; RV32IFD-NEXT: sltu a0, a0, a1
+; RV32IFD-NEXT: sltu a0, a2, a1
; RV32IFD-NEXT: j .LBB18_9
; RV32IFD-NEXT: .LBB18_8:
-; RV32IFD-NEXT: snez a0, a3
+; RV32IFD-NEXT: snez a0, a4
; RV32IFD-NEXT: .LBB18_9: # %entry
-; RV32IFD-NEXT: and a2, a2, a4
-; RV32IFD-NEXT: li a5, -1
-; RV32IFD-NEXT: beq a2, a5, .LBB18_11
+; RV32IFD-NEXT: and a5, a5, a3
+; RV32IFD-NEXT: li a2, -1
+; RV32IFD-NEXT: beq a5, a2, .LBB18_11
; RV32IFD-NEXT: # %bb.10: # %entry
-; RV32IFD-NEXT: srli a4, a4, 31
-; RV32IFD-NEXT: xori a0, a4, 1
+; RV32IFD-NEXT: srli a3, a3, 31
+; RV32IFD-NEXT: xori a0, a3, 1
; RV32IFD-NEXT: .LBB18_11: # %entry
; RV32IFD-NEXT: bnez a0, .LBB18_13
; RV32IFD-NEXT: # %bb.12: # %entry
; RV32IFD-NEXT: lui a1, 524288
; RV32IFD-NEXT: .LBB18_13: # %entry
; RV32IFD-NEXT: neg a0, a0
-; RV32IFD-NEXT: and a0, a0, a3
+; RV32IFD-NEXT: and a0, a0, a4
; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: .cfi_restore ra
; RV32IFD-NEXT: addi sp, sp, 32
@@ -1378,8 +1376,8 @@ define i64 @ustest_f64i64(double %x) {
; RV32IF-NEXT: # %bb.4: # %entry
; RV32IF-NEXT: li a0, 1
; RV32IF-NEXT: .LBB20_5: # %entry
-; RV32IF-NEXT: lw a4, 8(sp)
-; RV32IF-NEXT: lw a3, 12(sp)
+; RV32IF-NEXT: lw a3, 8(sp)
+; RV32IF-NEXT: lw a4, 12(sp)
; RV32IF-NEXT: and a5, a2, a1
; RV32IF-NEXT: beqz a5, .LBB20_7
; RV32IF-NEXT: # %bb.6: # %entry
@@ -1393,12 +1391,12 @@ define i64 @ustest_f64i64(double %x) {
; RV32IF-NEXT: and a2, a2, a3
; RV32IF-NEXT: bnez a0, .LBB20_10
; RV32IF-NEXT: # %bb.9:
-; RV32IF-NEXT: or a0, a4, a2
+; RV32IF-NEXT: or a0, a2, a4
; RV32IF-NEXT: snez a1, a0
; RV32IF-NEXT: .LBB20_10: # %entry
; RV32IF-NEXT: neg a1, a1
-; RV32IF-NEXT: and a0, a1, a4
-; RV32IF-NEXT: and a1, a1, a2
+; RV32IF-NEXT: and a0, a1, a2
+; RV32IF-NEXT: and a1, a1, a4
; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IF-NEXT: .cfi_restore ra
; RV32IF-NEXT: addi sp, sp, 32
@@ -1461,8 +1459,8 @@ define i64 @ustest_f64i64(double %x) {
; RV32IFD-NEXT: # %bb.4: # %entry
; RV32IFD-NEXT: li a0, 1
; RV32IFD-NEXT: .LBB20_5: # %entry
-; RV32IFD-NEXT: lw a4, 8(sp)
-; RV32IFD-NEXT: lw a3, 12(sp)
+; RV32IFD-NEXT: lw a3, 8(sp)
+; RV32IFD-NEXT: lw a4, 12(sp)
; RV32IFD-NEXT: and a5, a2, a1
; RV32IFD-NEXT: beqz a5, .LBB20_7
; RV32IFD-NEXT: # %bb.6: # %entry
@@ -1476,12 +1474,12 @@ define i64 @ustest_f64i64(double %x) {
; RV32IFD-NEXT: and a2, a2, a3
; RV32IFD-NEXT: bnez a0, .LBB20_10
; RV32IFD-NEXT: # %bb.9:
-; RV32IFD-NEXT: or a0, a4, a2
+; RV32IFD-NEXT: or a0, a2, a4
; RV32IFD-NEXT: snez a1, a0
; RV32IFD-NEXT: .LBB20_10: # %entry
; RV32IFD-NEXT: neg a1, a1
-; RV32IFD-NEXT: and a0, a1, a4
-; RV32IFD-NEXT: and a1, a1, a2
+; RV32IFD-NEXT: and a0, a1, a2
+; RV32IFD-NEXT: and a1, a1, a4
; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: .cfi_restore ra
; RV32IFD-NEXT: addi sp, sp, 32
@@ -1506,24 +1504,23 @@ define i64 @stest_f32i64(float %x) {
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: call __fixsfti
-; RV32-NEXT: lw a3, 8(sp)
; RV32-NEXT: lw a1, 12(sp)
-; RV32-NEXT: lw a2, 16(sp)
-; RV32-NEXT: lw a4, 20(sp)
-; RV32-NEXT: lui a0, 524288
-; RV32-NEXT: addi a5, a0, -1
+; RV32-NEXT: lw a0, 16(sp)
+; RV32-NEXT: lw a3, 20(sp)
+; RV32-NEXT: lw a4, 8(sp)
+; RV32-NEXT: lui a2, 524288
+; RV32-NEXT: addi a5, a2, -1
+; RV32-NEXT: or a7, a0, a3
; RV32-NEXT: beq a1, a5, .LBB21_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: sltu a6, a1, a5
-; RV32-NEXT: or a7, a2, a4
; RV32-NEXT: bnez a7, .LBB21_3
; RV32-NEXT: j .LBB21_4
; RV32-NEXT: .LBB21_2:
-; RV32-NEXT: sltiu a6, a3, -1
-; RV32-NEXT: or a7, a2, a4
+; RV32-NEXT: sltiu a6, a4, -1
; RV32-NEXT: beqz a7, .LBB21_4
; RV32-NEXT: .LBB21_3: # %entry
-; RV32-NEXT: srli a6, a4, 31
+; RV32-NEXT: srli a6, a3, 31
; RV32-NEXT: .LBB21_4: # %entry
; RV32-NEXT: neg a7, a6
; RV32-NEXT: addi t0, a6, -1
@@ -1531,29 +1528,29 @@ define i64 @stest_f32i64(float %x) {
; RV32-NEXT: # %bb.5: # %entry
; RV32-NEXT: mv a1, a5
; RV32-NEXT: .LBB21_6: # %entry
-; RV32-NEXT: or a3, t0, a3
-; RV32-NEXT: and a4, a7, a4
-; RV32-NEXT: and a2, a7, a2
-; RV32-NEXT: beq a1, a0, .LBB21_8
+; RV32-NEXT: or a4, t0, a4
+; RV32-NEXT: and a3, a7, a3
+; RV32-NEXT: and a5, a7, a0
+; RV32-NEXT: beq a1, a2, .LBB21_8
; RV32-NEXT: # %bb.7: # %entry
-; RV32-NEXT: sltu a0, a0, a1
+; RV32-NEXT: sltu a0, a2, a1
; RV32-NEXT: j .LBB21_9
; RV32-NEXT: .LBB21_8:
-; RV32-NEXT: snez a0, a3
+; RV32-NEXT: snez a0, a4
; RV32-NEXT: .LBB21_9: # %entry
-; RV32-NEXT: and a2, a2, a4
-; RV32-NEXT: li a5, -1
-; RV32-NEXT: beq a2, a5, .LBB21_11
+; RV32-NEXT: and a5, a5, a3
+; RV32-NEXT: li a2, -1
+; RV32-NEXT: beq a5, a2, .LBB21_11
; RV32-NEXT: # %bb.10: # %entry
-; RV32-NEXT: srli a4, a4, 31
-; RV32-NEXT: xori a0, a4, 1
+; RV32-NEXT: srli a3, a3, 31
+; RV32-NEXT: xori a0, a3, 1
; RV32-NEXT: .LBB21_11: # %entry
; RV32-NEXT: bnez a0, .LBB21_13
; RV32-NEXT: # %bb.12: # %entry
; RV32-NEXT: lui a1, 524288
; RV32-NEXT: .LBB21_13: # %entry
; RV32-NEXT: neg a0, a0
-; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: and a0, a0, a4
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 32
@@ -1658,8 +1655,8 @@ define i64 @ustest_f32i64(float %x) {
; RV32-NEXT: # %bb.4: # %entry
; RV32-NEXT: li a0, 1
; RV32-NEXT: .LBB23_5: # %entry
-; RV32-NEXT: lw a4, 8(sp)
-; RV32-NEXT: lw a3, 12(sp)
+; RV32-NEXT: lw a3, 8(sp)
+; RV32-NEXT: lw a4, 12(sp)
; RV32-NEXT: and a5, a2, a1
; RV32-NEXT: beqz a5, .LBB23_7
; RV32-NEXT: # %bb.6: # %entry
@@ -1673,12 +1670,12 @@ define i64 @ustest_f32i64(float %x) {
; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: bnez a0, .LBB23_10
; RV32-NEXT: # %bb.9:
-; RV32-NEXT: or a0, a4, a2
+; RV32-NEXT: or a0, a2, a4
; RV32-NEXT: snez a1, a0
; RV32-NEXT: .LBB23_10: # %entry
; RV32-NEXT: neg a1, a1
-; RV32-NEXT: and a0, a1, a4
-; RV32-NEXT: and a1, a1, a2
+; RV32-NEXT: and a0, a1, a2
+; RV32-NEXT: and a1, a1, a4
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 32
@@ -1733,24 +1730,23 @@ define i64 @stest_f16i64(half %x) {
; RV32-NEXT: call __extendhfsf2
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: call __fixsfti
-; RV32-NEXT: lw a3, 8(sp)
; RV32-NEXT: lw a1, 12(sp)
-; RV32-NEXT: lw a2, 16(sp)
-; RV32-NEXT: lw a4, 20(sp)
-; RV32-NEXT: lui a0, 524288
-; RV32-NEXT: addi a5, a0, -1
+; RV32-NEXT: lw a0, 16(sp)
+; RV32-NEXT: lw a3, 20(sp)
+; RV32-NEXT: lw a4, 8(sp)
+; RV32-NEXT: lui a2, 524288
+; RV32-NEXT: addi a5, a2, -1
+; RV32-NEXT: or a7, a0, a3
; RV32-NEXT: beq a1, a5, .LBB24_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: sltu a6, a1, a5
-; RV32-NEXT: or a7, a2, a4
; RV32-NEXT: bnez a7, .LBB24_3
; RV32-NEXT: j .LBB24_4
; RV32-NEXT: .LBB24_2:
-; RV32-NEXT: sltiu a6, a3, -1
-; RV32-NEXT: or a7, a2, a4
+; RV32-NEXT: sltiu a6, a4, -1
; RV32-NEXT: beqz a7, .LBB24_4
; RV32-NEXT: .LBB24_3: # %entry
-; RV32-NEXT: srli a6, a4, 31
+; RV32-NEXT: srli a6, a3, 31
; RV32-NEXT: .LBB24_4: # %entry
; RV32-NEXT: neg a7, a6
; RV32-NEXT: addi t0, a6, -1
@@ -1758,29 +1754,29 @@ define i64 @stest_f16i64(half %x) {
; RV32-NEXT: # %bb.5: # %entry
; RV32-NEXT: mv a1, a5
; RV32-NEXT: .LBB24_6: # %entry
-; RV32-NEXT: or a3, t0, a3
-; RV32-NEXT: and a4, a7, a4
-; RV32-NEXT: and a2, a7, a2
-; RV32-NEXT: beq a1, a0, .LBB24_8
+; RV32-NEXT: or a4, t0, a4
+; RV32-NEXT: and a3, a7, a3
+; RV32-NEXT: and a5, a7, a0
+; RV32-NEXT: beq a1, a2, .LBB24_8
; RV32-NEXT: # %bb.7: # %entry
-; RV32-NEXT: sltu a0, a0, a1
+; RV32-NEXT: sltu a0, a2, a1
; RV32-NEXT: j .LBB24_9
; RV32-NEXT: .LBB24_8:
-; RV32-NEXT: snez a0, a3
+; RV32-NEXT: snez a0, a4
; RV32-NEXT: .LBB24_9: # %entry
-; RV32-NEXT: and a2, a2, a4
-; RV32-NEXT: li a5, -1
-; RV32-NEXT: beq a2, a5, .LBB24_11
+; RV32-NEXT: and a5, a5, a3
+; RV32-NEXT: li a2, -1
+; RV32-NEXT: beq a5, a2, .LBB24_11
; RV32-NEXT: # %bb.10: # %entry
-; RV32-NEXT: srli a4, a4, 31
-; RV32-NEXT: xori a0, a4, 1
+; RV32-NEXT: srli a3, a3, 31
+; RV32-NEXT: xori a0, a3, 1
; RV32-NEXT: .LBB24_11: # %entry
; RV32-NEXT: bnez a0, .LBB24_13
; RV32-NEXT: # %bb.12: # %entry
; RV32-NEXT: lui a1, 524288
; RV32-NEXT: .LBB24_13: # %entry
; RV32-NEXT: neg a0, a0
-; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: and a0, a0, a4
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 32
@@ -1921,8 +1917,8 @@ define i64 @ustest_f16i64(half %x) {
; RV32-NEXT: # %bb.4: # %entry
; RV32-NEXT: li a0, 1
; RV32-NEXT: .LBB26_5: # %entry
-; RV32-NEXT: lw a4, 8(sp)
-; RV32-NEXT: lw a3, 12(sp)
+; RV32-NEXT: lw a3, 8(sp)
+; RV32-NEXT: lw a4, 12(sp)
; RV32-NEXT: and a5, a2, a1
; RV32-NEXT: beqz a5, .LBB26_7
; RV32-NEXT: # %bb.6: # %entry
@@ -1936,12 +1932,12 @@ define i64 @ustest_f16i64(half %x) {
; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: bnez a0, .LBB26_10
; RV32-NEXT: # %bb.9:
-; RV32-NEXT: or a0, a4, a2
+; RV32-NEXT: or a0, a2, a4
; RV32-NEXT: snez a1, a0
; RV32-NEXT: .LBB26_10: # %entry
; RV32-NEXT: neg a1, a1
-; RV32-NEXT: and a0, a1, a4
-; RV32-NEXT: and a1, a1, a2
+; RV32-NEXT: and a0, a1, a2
+; RV32-NEXT: and a1, a1, a4
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 32
@@ -3027,24 +3023,23 @@ define i64 @stest_f64i64_mm(double %x) {
; RV32IF-NEXT: mv a1, a0
; RV32IF-NEXT: addi a0, sp, 8
; RV32IF-NEXT: call __fixdfti
-; RV32IF-NEXT: lw a3, 8(sp)
; RV32IF-NEXT: lw a1, 12(sp)
-; RV32IF-NEXT: lw a2, 16(sp)
-; RV32IF-NEXT: lw a4, 20(sp)
-; RV32IF-NEXT: lui a0, 524288
-; RV32IF-NEXT: addi a5, a0, -1
+; RV32IF-NEXT: lw a0, 16(sp)
+; RV32IF-NEXT: lw a3, 20(sp)
+; RV32IF-NEXT: lw a4, 8(sp)
+; RV32IF-NEXT: lui a2, 524288
+; RV32IF-NEXT: addi a5, a2, -1
+; RV32IF-NEXT: or a7, a0, a3
; RV32IF-NEXT: beq a1, a5, .LBB45_2
; RV32IF-NEXT: # %bb.1: # %entry
; RV32IF-NEXT: sltu a6, a1, a5
-; RV32IF-NEXT: or a7, a2, a4
; RV32IF-NEXT: bnez a7, .LBB45_3
; RV32IF-NEXT: j .LBB45_4
; RV32IF-NEXT: .LBB45_2:
-; RV32IF-NEXT: sltiu a6, a3, -1
-; RV32IF-NEXT: or a7, a2, a4
+; RV32IF-NEXT: sltiu a6, a4, -1
; RV32IF-NEXT: beqz a7, .LBB45_4
; RV32IF-NEXT: .LBB45_3: # %entry
-; RV32IF-NEXT: srli a6, a4, 31
+; RV32IF-NEXT: srli a6, a3, 31
; RV32IF-NEXT: .LBB45_4: # %entry
; RV32IF-NEXT: neg a7, a6
; RV32IF-NEXT: addi t0, a6, -1
@@ -3052,29 +3047,29 @@ define i64 @stest_f64i64_mm(double %x) {
; RV32IF-NEXT: # %bb.5: # %entry
; RV32IF-NEXT: mv a1, a5
; RV32IF-NEXT: .LBB45_6: # %entry
-; RV32IF-NEXT: or a3, t0, a3
-; RV32IF-NEXT: and a4, a7, a4
-; RV32IF-NEXT: and a2, a7, a2
-; RV32IF-NEXT: beq a1, a0, .LBB45_8
+; RV32IF-NEXT: or a4, t0, a4
+; RV32IF-NEXT: and a3, a7, a3
+; RV32IF-NEXT: and a5, a7, a0
+; RV32IF-NEXT: beq a1, a2, .LBB45_8
; RV32IF-NEXT: # %bb.7: # %entry
-; RV32IF-NEXT: sltu a0, a0, a1
+; RV32IF-NEXT: sltu a0, a2, a1
; RV32IF-NEXT: j .LBB45_9
; RV32IF-NEXT: .LBB45_8:
-; RV32IF-NEXT: snez a0, a3
+; RV32IF-NEXT: snez a0, a4
; RV32IF-NEXT: .LBB45_9: # %entry
-; RV32IF-NEXT: and a2, a2, a4
-; RV32IF-NEXT: li a5, -1
-; RV32IF-NEXT: beq a2, a5, .LBB45_11
+; RV32IF-NEXT: and a5, a5, a3
+; RV32IF-NEXT: li a2, -1
+; RV32IF-NEXT: beq a5, a2, .LBB45_11
; RV32IF-NEXT: # %bb.10: # %entry
-; RV32IF-NEXT: srli a4, a4, 31
-; RV32IF-NEXT: xori a0, a4, 1
+; RV32IF-NEXT: srli a3, a3, 31
+; RV32IF-NEXT: xori a0, a3, 1
; RV32IF-NEXT: .LBB45_11: # %entry
; RV32IF-NEXT: bnez a0, .LBB45_13
; RV32IF-NEXT: # %bb.12: # %entry
; RV32IF-NEXT: lui a1, 524288
; RV32IF-NEXT: .LBB45_13: # %entry
; RV32IF-NEXT: neg a0, a0
-; RV32IF-NEXT: and a0, a0, a3
+; RV32IF-NEXT: and a0, a0, a4
; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IF-NEXT: .cfi_restore ra
; RV32IF-NEXT: addi sp, sp, 32
@@ -3130,24 +3125,23 @@ define i64 @stest_f64i64_mm(double %x) {
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: addi a0, sp, 8
; RV32IFD-NEXT: call __fixdfti
-; RV32IFD-NEXT: lw a3, 8(sp)
; RV32IFD-NEXT: lw a1, 12(sp)
-; RV32IFD-NEXT: lw a2, 16(sp)
-; RV32IFD-NEXT: lw a4, 20(sp)
-; RV32IFD-NEXT: lui a0, 524288
-; RV32IFD-NEXT: addi a5, a0, -1
+; RV32IFD-NEXT: lw a0, 16(sp)
+; RV32IFD-NEXT: lw a3, 20(sp)
+; RV32IFD-NEXT: lw a4, 8(sp)
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: addi a5, a2, -1
+; RV32IFD-NEXT: or a7, a0, a3
; RV32IFD-NEXT: beq a1, a5, .LBB45_2
; RV32IFD-NEXT: # %bb.1: # %entry
; RV32IFD-NEXT: sltu a6, a1, a5
-; RV32IFD-NEXT: or a7, a2, a4
; RV32IFD-NEXT: bnez a7, .LBB45_3
; RV32IFD-NEXT: j .LBB45_4
; RV32IFD-NEXT: .LBB45_2:
-; RV32IFD-NEXT: sltiu a6, a3, -1
-; RV32IFD-NEXT: or a7, a2, a4
+; RV32IFD-NEXT: sltiu a6, a4, -1
; RV32IFD-NEXT: beqz a7, .LBB45_4
; RV32IFD-NEXT: .LBB45_3: # %entry
-; RV32IFD-NEXT: srli a6, a4, 31
+; RV32IFD-NEXT: srli a6, a3, 31
; RV32IFD-NEXT: .LBB45_4: # %entry
; RV32IFD-NEXT: neg a7, a6
; RV32IFD-NEXT: addi t0, a6, -1
@@ -3155,29 +3149,29 @@ define i64 @stest_f64i64_mm(double %x) {
; RV32IFD-NEXT: # %bb.5: # %entry
; RV32IFD-NEXT: mv a1, a5
; RV32IFD-NEXT: .LBB45_6: # %entry
-; RV32IFD-NEXT: or a3, t0, a3
-; RV32IFD-NEXT: and a4, a7, a4
-; RV32IFD-NEXT: and a2, a7, a2
-; RV32IFD-NEXT: beq a1, a0, .LBB45_8
+; RV32IFD-NEXT: or a4, t0, a4
+; RV32IFD-NEXT: and a3, a7, a3
+; RV32IFD-NEXT: and a5, a7, a0
+; RV32IFD-NEXT: beq a1, a2, .LBB45_8
; RV32IFD-NEXT: # %bb.7: # %entry
-; RV32IFD-NEXT: sltu a0, a0, a1
+; RV32IFD-NEXT: sltu a0, a2, a1
; RV32IFD-NEXT: j .LBB45_9
; RV32IFD-NEXT: .LBB45_8:
-; RV32IFD-NEXT: snez a0, a3
+; RV32IFD-NEXT: snez a0, a4
; RV32IFD-NEXT: .LBB45_9: # %entry
-; RV32IFD-NEXT: and a2, a2, a4
-; RV32IFD-NEXT: li a5, -1
-; RV32IFD-NEXT: beq a2, a5, .LBB45_11
+; RV32IFD-NEXT: and a5, a5, a3
+; RV32IFD-NEXT: li a2, -1
+; RV32IFD-NEXT: beq a5, a2, .LBB45_11
; RV32IFD-NEXT: # %bb.10: # %entry
-; RV32IFD-NEXT: srli a4, a4, 31
-; RV32IFD-NEXT: xori a0, a4, 1
+; RV32IFD-NEXT: srli a3, a3, 31
+; RV32IFD-NEXT: xori a0, a3, 1
; RV32IFD-NEXT: .LBB45_11: # %entry
; RV32IFD-NEXT: bnez a0, .LBB45_13
; RV32IFD-NEXT: # %bb.12: # %entry
; RV32IFD-NEXT: lui a1, 524288
; RV32IFD-NEXT: .LBB45_13: # %entry
; RV32IFD-NEXT: neg a0, a0
-; RV32IFD-NEXT: and a0, a0, a3
+; RV32IFD-NEXT: and a0, a0, a4
; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: .cfi_restore ra
; RV32IFD-NEXT: addi sp, sp, 32
@@ -3400,24 +3394,23 @@ define i64 @stest_f32i64_mm(float %x) {
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: call __fixsfti
-; RV32-NEXT: lw a3, 8(sp)
; RV32-NEXT: lw a1, 12(sp)
-; RV32-NEXT: lw a2, 16(sp)
-; RV32-NEXT: lw a4, 20(sp)
-; RV32-NEXT: lui a0, 524288
-; RV32-NEXT: addi a5, a0, -1
+; RV32-NEXT: lw a0, 16(sp)
+; RV32-NEXT: lw a3, 20(sp)
+; RV32-NEXT: lw a4, 8(sp)
+; RV32-NEXT: lui a2, 524288
+; RV32-NEXT: addi a5, a2, -1
+; RV32-NEXT: or a7, a0, a3
; RV32-NEXT: beq a1, a5, .LBB48_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: sltu a6, a1, a5
-; RV32-NEXT: or a7, a2, a4
; RV32-NEXT: bnez a7, .LBB48_3
; RV32-NEXT: j .LBB48_4
; RV32-NEXT: .LBB48_2:
-; RV32-NEXT: sltiu a6, a3, -1
-; RV32-NEXT: or a7, a2, a4
+; RV32-NEXT: sltiu a6, a4, -1
; RV32-NEXT: beqz a7, .LBB48_4
; RV32-NEXT: .LBB48_3: # %entry
-; RV32-NEXT: srli a6, a4, 31
+; RV32-NEXT: srli a6, a3, 31
; RV32-NEXT: .LBB48_4: # %entry
; RV32-NEXT: neg a7, a6
; RV32-NEXT: addi t0, a6, -1
@@ -3425,29 +3418,29 @@ define i64 @stest_f32i64_mm(float %x) {
; RV32-NEXT: # %bb.5: # %entry
; RV32-NEXT: mv a1, a5
; RV32-NEXT: .LBB48_6: # %entry
-; RV32-NEXT: or a3, t0, a3
-; RV32-NEXT: and a4, a7, a4
-; RV32-NEXT: and a2, a7, a2
-; RV32-NEXT: beq a1, a0, .LBB48_8
+; RV32-NEXT: or a4, t0, a4
+; RV32-NEXT: and a3, a7, a3
+; RV32-NEXT: and a5, a7, a0
+; RV32-NEXT: beq a1, a2, .LBB48_8
; RV32-NEXT: # %bb.7: # %entry
-; RV32-NEXT: sltu a0, a0, a1
+; RV32-NEXT: sltu a0, a2, a1
; RV32-NEXT: j .LBB48_9
; RV32-NEXT: .LBB48_8:
-; RV32-NEXT: snez a0, a3
+; RV32-NEXT: snez a0, a4
; RV32-NEXT: .LBB48_9: # %entry
-; RV32-NEXT: and a2, a2, a4
-; RV32-NEXT: li a5, -1
-; RV32-NEXT: beq a2, a5, .LBB48_11
+; RV32-NEXT: and a5, a5, a3
+; RV32-NEXT: li a2, -1
+; RV32-NEXT: beq a5, a2, .LBB48_11
; RV32-NEXT: # %bb.10: # %entry
-; RV32-NEXT: srli a4, a4, 31
-; RV32-NEXT: xori a0, a4, 1
+; RV32-NEXT: srli a3, a3, 31
+; RV32-NEXT: xori a0, a3, 1
; RV32-NEXT: .LBB48_11: # %entry
; RV32-NEXT: bnez a0, .LBB48_13
; RV32-NEXT: # %bb.12: # %entry
; RV32-NEXT: lui a1, 524288
; RV32-NEXT: .LBB48_13: # %entry
; RV32-NEXT: neg a0, a0
-; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: and a0, a0, a4
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 32
@@ -3601,24 +3594,23 @@ define i64 @stest_f16i64_mm(half %x) {
; RV32-NEXT: call __extendhfsf2
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: call __fixsfti
-; RV32-NEXT: lw a3, 8(sp)
; RV32-NEXT: lw a1, 12(sp)
-; RV32-NEXT: lw a2, 16(sp)
-; RV32-NEXT: lw a4, 20(sp)
-; RV32-NEXT: lui a0, 524288
-; RV32-NEXT: addi a5, a0, -1
+; RV32-NEXT: lw a0, 16(sp)
+; RV32-NEXT: lw a3, 20(sp)
+; RV32-NEXT: lw a4, 8(sp)
+; RV32-NEXT: lui a2, 524288
+; RV32-NEXT: addi a5, a2, -1
+; RV32-NEXT: or a7, a0, a3
; RV32-NEXT: beq a1, a5, .LBB51_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: sltu a6, a1, a5
-; RV32-NEXT: or a7, a2, a4
; RV32-NEXT: bnez a7, .LBB51_3
; RV32-NEXT: j .LBB51_4
; RV32-NEXT: .LBB51_2:
-; RV32-NEXT: sltiu a6, a3, -1
-; RV32-NEXT: or a7, a2, a4
+; RV32-NEXT: sltiu a6, a4, -1
; RV32-NEXT: beqz a7, .LBB51_4
; RV32-NEXT: .LBB51_3: # %entry
-; RV32-NEXT: srli a6, a4, 31
+; RV32-NEXT: srli a6, a3, 31
; RV32-NEXT: .LBB51_4: # %entry
; RV32-NEXT: neg a7, a6
; RV32-NEXT: addi t0, a6, -1
@@ -3626,29 +3618,29 @@ define i64 @stest_f16i64_mm(half %x) {
; RV32-NEXT: # %bb.5: # %entry
; RV32-NEXT: mv a1, a5
; RV32-NEXT: .LBB51_6: # %entry
-; RV32-NEXT: or a3, t0, a3
-; RV32-NEXT: and a4, a7, a4
-; RV32-NEXT: and a2, a7, a2
-; RV32-NEXT: beq a1, a0, .LBB51_8
+; RV32-NEXT: or a4, t0, a4
+; RV32-NEXT: and a3, a7, a3
+; RV32-NEXT: and a5, a7, a0
+; RV32-NEXT: beq a1, a2, .LBB51_8
; RV32-NEXT: # %bb.7: # %entry
-; RV32-NEXT: sltu a0, a0, a1
+; RV32-NEXT: sltu a0, a2, a1
; RV32-NEXT: j .LBB51_9
; RV32-NEXT: .LBB51_8:
-; RV32-NEXT: snez a0, a3
+; RV32-NEXT: snez a0, a4
; RV32-NEXT: .LBB51_9: # %entry
-; RV32-NEXT: and a2, a2, a4
-; RV32-NEXT: li a5, -1
-; RV32-NEXT: beq a2, a5, .LBB51_11
+; RV32-NEXT: and a5, a5, a3
+; RV32-NEXT: li a2, -1
+; RV32-NEXT: beq a5, a2, .LBB51_11
; RV32-NEXT: # %bb.10: # %entry
-; RV32-NEXT: srli a4, a4, 31
-; RV32-NEXT: xori a0, a4, 1
+; RV32-NEXT: srli a3, a3, 31
+; RV32-NEXT: xori a0, a3, 1
; RV32-NEXT: .LBB51_11: # %entry
; RV32-NEXT: bnez a0, .LBB51_13
; RV32-NEXT: # %bb.12: # %entry
; RV32-NEXT: lui a1, 524288
; RV32-NEXT: .LBB51_13: # %entry
; RV32-NEXT: neg a0, a0
-; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: and a0, a0, a4
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 32
diff --git a/llvm/test/CodeGen/RISCV/iabs.ll b/llvm/test/CodeGen/RISCV/iabs.ll
index c157c63722cb4..a570e261db6a6 100644
--- a/llvm/test/CodeGen/RISCV/iabs.ll
+++ b/llvm/test/CodeGen/RISCV/iabs.ll
@@ -308,11 +308,11 @@ define i128 @abs128(i128 %x) {
; RV32I-NEXT: bgez a2, .LBB8_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: neg a5, a1
-; RV32I-NEXT: snez a6, a4
-; RV32I-NEXT: snez a7, a3
+; RV32I-NEXT: or a6, a3, a4
; RV32I-NEXT: snez a1, a1
+; RV32I-NEXT: snez a7, a3
; RV32I-NEXT: neg a4, a4
-; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: snez a6, a6
; RV32I-NEXT: add a1, a2, a1
; RV32I-NEXT: sub a4, a4, a7
; RV32I-NEXT: sltu a2, a5, a6
@@ -336,11 +336,11 @@ define i128 @abs128(i128 %x) {
; RV32ZBB-NEXT: bgez a2, .LBB8_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: neg a5, a1
-; RV32ZBB-NEXT: snez a6, a4
-; RV32ZBB-NEXT: snez a7, a3
+; RV32ZBB-NEXT: or a6, a3, a4
; RV32ZBB-NEXT: snez a1, a1
+; RV32ZBB-NEXT: snez a7, a3
; RV32ZBB-NEXT: neg a4, a4
-; RV32ZBB-NEXT: or a6, a7, a6
+; RV32ZBB-NEXT: snez a6, a6
; RV32ZBB-NEXT: add a1, a2, a1
; RV32ZBB-NEXT: sub a4, a4, a7
; RV32ZBB-NEXT: sltu a2, a5, a6
@@ -390,11 +390,11 @@ define i128 @select_abs128(i128 %x) {
; RV32I-NEXT: bgez a2, .LBB9_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: neg a5, a1
-; RV32I-NEXT: snez a6, a4
-; RV32I-NEXT: snez a7, a3
+; RV32I-NEXT: or a6, a3, a4
; RV32I-NEXT: snez a1, a1
+; RV32I-NEXT: snez a7, a3
; RV32I-NEXT: neg a4, a4
-; RV32I-NEXT: or a6, a7, a6
+; RV32I-NEXT: snez a6, a6
; RV32I-NEXT: add a1, a2, a1
; RV32I-NEXT: sub a4, a4, a7
; RV32I-NEXT: sltu a2, a5, a6
@@ -418,11 +418,11 @@ define i128 @select_abs128(i128 %x) {
; RV32ZBB-NEXT: bgez a2, .LBB9_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: neg a5, a1
-; RV32ZBB-NEXT: snez a6, a4
-; RV32ZBB-NEXT: snez a7, a3
+; RV32ZBB-NEXT: or a6, a3, a4
; RV32ZBB-NEXT: snez a1, a1
+; RV32ZBB-NEXT: snez a7, a3
; RV32ZBB-NEXT: neg a4, a4
-; RV32ZBB-NEXT: or a6, a7, a6
+; RV32ZBB-NEXT: snez a6, a6
; RV32ZBB-NEXT: add a1, a2, a1
; RV32ZBB-NEXT: sub a4, a4, a7
; RV32ZBB-NEXT: sltu a2, a5, a6
diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
index a06c7505d543d..ba15985a446a1 100644
--- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
+++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
@@ -109,20 +109,19 @@ define i64 @ctz_nxv8i1_no_range(<vscale x 8 x i16> %a) {
;
; RV64-LABEL: ctz_nxv8i1_no_range:
; RV64: # %bb.0:
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; RV64-NEXT: vid.v v16
-; RV64-NEXT: li a1, -1
+; RV64-NEXT: li a0, -1
+; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV64-NEXT: vmsne.vi v0, v8, 0
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vmadd.vx v16, a1, v8
-; RV64-NEXT: vmv.v.i v8, 0
-; RV64-NEXT: vmerge.vvm v8, v8, v16, v0
-; RV64-NEXT: vredmaxu.vs v8, v8, v8
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: sub a0, a0, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vmul.vx v8, v16, a0
+; RV64-NEXT: vmv.v.i v16, 0
+; RV64-NEXT: vadd.vx v16, v8, a1, v0.t
+; RV64-NEXT: vredmaxu.vs v8, v16, v16
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: sub a0, a1, a0
; RV64-NEXT: ret
%res = call i64 @llvm.experimental.cttz.elts.i64.nxv8i16(<vscale x 8 x i16> %a, i1 0)
ret i64 %res
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
index 32892bca84747..4f1d614b66778 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
@@ -7,10 +7,10 @@
; RUN: --check-prefixes=CHECK,CHECK64,ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,CHECK32,ZVFHMIN
+; RUN: --check-prefixes=CHECK,CHECK32,ZVFHMIN,ZVFHMIN32
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,CHECK64,ZVFHMIN
+; RUN: --check-prefixes=CHECK,CHECK64,ZVFHMIN,ZVFHMIN64
declare <vscale x 1 x i1> @llvm.vp.fcmp.nxv1bf16(<vscale x 1 x bfloat>, <vscale x 1 x bfloat>, metadata, <vscale x 1 x i1>, i32)
@@ -1345,211 +1345,404 @@ define <vscale x 8 x i1> @fcmp_uno_vf_swap_nxv8bf16(<vscale x 8 x bfloat> %va, b
declare <vscale x 64 x i1> @llvm.vp.fcmp.nxv64bf16(<vscale x 64 x bfloat>, <vscale x 64 x bfloat>, metadata, <vscale x 64 x i1>, i32)
define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64bf16(<vscale x 64 x bfloat> %va, <vscale x 64 x bfloat> %vb, <vscale x 64 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: fcmp_oeq_vv_nxv64bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: mv a3, a1
-; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: add a3, a3, a1
-; CHECK-NEXT: slli a1, a1, 2
-; CHECK-NEXT: add a3, a3, a1
-; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: add a1, a1, a3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1b, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 27 * vlenb
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: mv a3, a1
-; CHECK-NEXT: slli a1, a1, 2
-; CHECK-NEXT: add a3, a3, a1
-; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: add a1, a1, a3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs1r.v v0, (a1) # vscale x 8-byte Folded Spill
-; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmv8r.v v0, v16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: mv a3, a1
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, a1, a3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: csrr a3, vlenb
-; CHECK-NEXT: slli a1, a3, 3
-; CHECK-NEXT: slli a5, a3, 2
-; CHECK-NEXT: slli a4, a3, 1
-; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: sub a6, a2, a5
-; CHECK-NEXT: vl8re16.v v24, (a1)
-; CHECK-NEXT: sltu a1, a2, a6
-; CHECK-NEXT: addi a1, a1, -1
-; CHECK-NEXT: and a6, a1, a6
-; CHECK-NEXT: sub a1, a6, a4
-; CHECK-NEXT: sltu a7, a6, a1
-; CHECK-NEXT: addi a7, a7, -1
-; CHECK-NEXT: and a7, a7, a1
-; CHECK-NEXT: srli a1, a3, 1
-; CHECK-NEXT: srli a3, a3, 2
-; CHECK-NEXT: csrr t0, vlenb
-; CHECK-NEXT: slli t0, t0, 1
-; CHECK-NEXT: mv t1, t0
-; CHECK-NEXT: slli t0, t0, 2
-; CHECK-NEXT: add t1, t1, t0
-; CHECK-NEXT: slli t0, t0, 1
-; CHECK-NEXT: add t0, t0, t1
-; CHECK-NEXT: add t0, sp, t0
-; CHECK-NEXT: addi t0, t0, 16
-; CHECK-NEXT: vl1r.v v8, (t0) # vscale x 8-byte Folded Reload
-; CHECK-NEXT: vslidedown.vx v16, v8, a1
-; CHECK-NEXT: vl8re16.v v8, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: mv t0, a0
-; CHECK-NEXT: slli a0, a0, 2
-; CHECK-NEXT: add a0, a0, t0
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill
-; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v8, v16, a3
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
-; CHECK-NEXT: vsetvli zero, a7, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v28
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v4
-; CHECK-NEXT: bltu a6, a4, .LBB85_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a6, a4
-; CHECK-NEXT: .LBB85_2:
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vmfeq.vv v5, v8, v16, v0.t
-; CHECK-NEXT: vsetvli zero, a6, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vmfeq.vv v7, v24, v16, v0.t
-; CHECK-NEXT: bltu a2, a5, .LBB85_4
-; CHECK-NEXT: # %bb.3:
-; CHECK-NEXT: mv a2, a5
-; CHECK-NEXT: .LBB85_4:
-; CHECK-NEXT: sub a0, a2, a4
-; CHECK-NEXT: csrr a5, vlenb
-; CHECK-NEXT: slli a5, a5, 1
-; CHECK-NEXT: mv a6, a5
-; CHECK-NEXT: slli a5, a5, 2
-; CHECK-NEXT: add a6, a6, a5
-; CHECK-NEXT: slli a5, a5, 1
-; CHECK-NEXT: add a5, a5, a6
-; CHECK-NEXT: add a5, sp, a5
-; CHECK-NEXT: addi a5, a5, 16
-; CHECK-NEXT: vl1r.v v8, (a5) # vscale x 8-byte Folded Reload
-; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v8, a3
-; CHECK-NEXT: sltu a5, a2, a0
-; CHECK-NEXT: addi a5, a5, -1
-; CHECK-NEXT: and a0, a5, a0
-; CHECK-NEXT: csrr a5, vlenb
-; CHECK-NEXT: slli a5, a5, 1
-; CHECK-NEXT: mv a6, a5
-; CHECK-NEXT: slli a5, a5, 3
-; CHECK-NEXT: add a5, a5, a6
-; CHECK-NEXT: add a5, sp, a5
-; CHECK-NEXT: addi a5, a5, 16
-; CHECK-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v28
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: mv a5, a0
-; CHECK-NEXT: slli a0, a0, 2
-; CHECK-NEXT: add a0, a0, a5
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vmfeq.vv v10, v16, v24, v0.t
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslideup.vx v9, v5, a3
-; CHECK-NEXT: bltu a2, a4, .LBB85_6
-; CHECK-NEXT: # %bb.5:
-; CHECK-NEXT: mv a2, a4
-; CHECK-NEXT: .LBB85_6:
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: mv a4, a0
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, a0, a4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: mv a2, a0
-; CHECK-NEXT: slli a0, a0, 2
-; CHECK-NEXT: add a0, a0, a2
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v0
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: mv a2, a0
-; CHECK-NEXT: slli a0, a0, 2
-; CHECK-NEXT: add a2, a2, a0
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: add a0, a0, a2
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vmfeq.vv v8, v16, v24, v0.t
-; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslideup.vx v8, v10, a3
-; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT: vslideup.vx v8, v9, a1
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: add a1, a1, a0
-; CHECK-NEXT: slli a0, a0, 2
-; CHECK-NEXT: add a1, a1, a0
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: ret
+; CHECK32-LABEL: fcmp_oeq_vv_nxv64bf16:
+; CHECK32: # %bb.0:
+; CHECK32-NEXT: addi sp, sp, -16
+; CHECK32-NEXT: .cfi_def_cfa_offset 16
+; CHECK32-NEXT: csrr a1, vlenb
+; CHECK32-NEXT: mv a3, a1
+; CHECK32-NEXT: slli a1, a1, 1
+; CHECK32-NEXT: add a3, a3, a1
+; CHECK32-NEXT: slli a1, a1, 2
+; CHECK32-NEXT: add a3, a3, a1
+; CHECK32-NEXT: slli a1, a1, 1
+; CHECK32-NEXT: add a1, a1, a3
+; CHECK32-NEXT: sub sp, sp, a1
+; CHECK32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1b, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 27 * vlenb
+; CHECK32-NEXT: csrr a1, vlenb
+; CHECK32-NEXT: slli a1, a1, 1
+; CHECK32-NEXT: mv a3, a1
+; CHECK32-NEXT: slli a1, a1, 2
+; CHECK32-NEXT: add a3, a3, a1
+; CHECK32-NEXT: slli a1, a1, 1
+; CHECK32-NEXT: add a1, a1, a3
+; CHECK32-NEXT: add a1, sp, a1
+; CHECK32-NEXT: addi a1, a1, 16
+; CHECK32-NEXT: vs1r.v v0, (a1) # vscale x 8-byte Folded Spill
+; CHECK32-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; CHECK32-NEXT: vmv8r.v v0, v16
+; CHECK32-NEXT: csrr a1, vlenb
+; CHECK32-NEXT: slli a1, a1, 1
+; CHECK32-NEXT: mv a3, a1
+; CHECK32-NEXT: slli a1, a1, 3
+; CHECK32-NEXT: add a1, a1, a3
+; CHECK32-NEXT: add a1, sp, a1
+; CHECK32-NEXT: addi a1, a1, 16
+; CHECK32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK32-NEXT: csrr a3, vlenb
+; CHECK32-NEXT: slli a1, a3, 3
+; CHECK32-NEXT: slli a5, a3, 2
+; CHECK32-NEXT: slli a4, a3, 1
+; CHECK32-NEXT: add a1, a0, a1
+; CHECK32-NEXT: sub a6, a2, a5
+; CHECK32-NEXT: vl8re16.v v24, (a1)
+; CHECK32-NEXT: sltu a1, a2, a6
+; CHECK32-NEXT: addi a1, a1, -1
+; CHECK32-NEXT: and a6, a1, a6
+; CHECK32-NEXT: sub a1, a6, a4
+; CHECK32-NEXT: sltu a7, a6, a1
+; CHECK32-NEXT: addi a7, a7, -1
+; CHECK32-NEXT: and a7, a7, a1
+; CHECK32-NEXT: srli a1, a3, 1
+; CHECK32-NEXT: srli a3, a3, 2
+; CHECK32-NEXT: csrr t0, vlenb
+; CHECK32-NEXT: slli t0, t0, 1
+; CHECK32-NEXT: mv t1, t0
+; CHECK32-NEXT: slli t0, t0, 2
+; CHECK32-NEXT: add t1, t1, t0
+; CHECK32-NEXT: slli t0, t0, 1
+; CHECK32-NEXT: add t0, t0, t1
+; CHECK32-NEXT: add t0, sp, t0
+; CHECK32-NEXT: addi t0, t0, 16
+; CHECK32-NEXT: vl1r.v v8, (t0) # vscale x 8-byte Folded Reload
+; CHECK32-NEXT: vslidedown.vx v16, v8, a1
+; CHECK32-NEXT: vl8re16.v v8, (a0)
+; CHECK32-NEXT: csrr a0, vlenb
+; CHECK32-NEXT: slli a0, a0, 1
+; CHECK32-NEXT: mv t0, a0
+; CHECK32-NEXT: slli a0, a0, 2
+; CHECK32-NEXT: add a0, a0, t0
+; CHECK32-NEXT: add a0, sp, a0
+; CHECK32-NEXT: addi a0, a0, 16
+; CHECK32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; CHECK32-NEXT: csrr a0, vlenb
+; CHECK32-NEXT: add a0, sp, a0
+; CHECK32-NEXT: addi a0, a0, 16
+; CHECK32-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill
+; CHECK32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK32-NEXT: vslidedown.vx v8, v16, a3
+; CHECK32-NEXT: addi a0, sp, 16
+; CHECK32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; CHECK32-NEXT: vsetvli zero, a7, e16, m4, ta, ma
+; CHECK32-NEXT: vfwcvtbf16.f.f.v v16, v28
+; CHECK32-NEXT: csrr a0, vlenb
+; CHECK32-NEXT: slli a0, a0, 1
+; CHECK32-NEXT: add a0, sp, a0
+; CHECK32-NEXT: addi a0, a0, 16
+; CHECK32-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
+; CHECK32-NEXT: vfwcvtbf16.f.f.v v8, v4
+; CHECK32-NEXT: bltu a6, a4, .LBB85_2
+; CHECK32-NEXT: # %bb.1:
+; CHECK32-NEXT: mv a6, a4
+; CHECK32-NEXT: .LBB85_2:
+; CHECK32-NEXT: addi a0, sp, 16
+; CHECK32-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; CHECK32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK32-NEXT: vmfeq.vv v5, v8, v16, v0.t
+; CHECK32-NEXT: vsetvli zero, a6, e16, m4, ta, ma
+; CHECK32-NEXT: vfwcvtbf16.f.f.v v16, v24
+; CHECK32-NEXT: csrr a0, vlenb
+; CHECK32-NEXT: slli a0, a0, 1
+; CHECK32-NEXT: add a0, sp, a0
+; CHECK32-NEXT: addi a0, a0, 16
+; CHECK32-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; CHECK32-NEXT: vfwcvtbf16.f.f.v v24, v8
+; CHECK32-NEXT: csrr a0, vlenb
+; CHECK32-NEXT: add a0, sp, a0
+; CHECK32-NEXT: addi a0, a0, 16
+; CHECK32-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; CHECK32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK32-NEXT: vmfeq.vv v7, v24, v16, v0.t
+; CHECK32-NEXT: bltu a2, a5, .LBB85_4
+; CHECK32-NEXT: # %bb.3:
+; CHECK32-NEXT: mv a2, a5
+; CHECK32-NEXT: .LBB85_4:
+; CHECK32-NEXT: sub a0, a2, a4
+; CHECK32-NEXT: csrr a5, vlenb
+; CHECK32-NEXT: slli a5, a5, 1
+; CHECK32-NEXT: mv a6, a5
+; CHECK32-NEXT: slli a5, a5, 2
+; CHECK32-NEXT: add a6, a6, a5
+; CHECK32-NEXT: slli a5, a5, 1
+; CHECK32-NEXT: add a5, a5, a6
+; CHECK32-NEXT: add a5, sp, a5
+; CHECK32-NEXT: addi a5, a5, 16
+; CHECK32-NEXT: vl1r.v v8, (a5) # vscale x 8-byte Folded Reload
+; CHECK32-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; CHECK32-NEXT: vslidedown.vx v0, v8, a3
+; CHECK32-NEXT: sltu a5, a2, a0
+; CHECK32-NEXT: addi a5, a5, -1
+; CHECK32-NEXT: and a0, a5, a0
+; CHECK32-NEXT: csrr a5, vlenb
+; CHECK32-NEXT: slli a5, a5, 1
+; CHECK32-NEXT: mv a6, a5
+; CHECK32-NEXT: slli a5, a5, 3
+; CHECK32-NEXT: add a5, a5, a6
+; CHECK32-NEXT: add a5, sp, a5
+; CHECK32-NEXT: addi a5, a5, 16
+; CHECK32-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; CHECK32-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK32-NEXT: vfwcvtbf16.f.f.v v16, v28
+; CHECK32-NEXT: csrr a0, vlenb
+; CHECK32-NEXT: slli a0, a0, 1
+; CHECK32-NEXT: mv a5, a0
+; CHECK32-NEXT: slli a0, a0, 2
+; CHECK32-NEXT: add a0, a0, a5
+; CHECK32-NEXT: add a0, sp, a0
+; CHECK32-NEXT: addi a0, a0, 16
+; CHECK32-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; CHECK32-NEXT: vfwcvtbf16.f.f.v v24, v12
+; CHECK32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK32-NEXT: vmfeq.vv v10, v16, v24, v0.t
+; CHECK32-NEXT: vmv1r.v v9, v7
+; CHECK32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK32-NEXT: vslideup.vx v9, v5, a3
+; CHECK32-NEXT: bltu a2, a4, .LBB85_6
+; CHECK32-NEXT: # %bb.5:
+; CHECK32-NEXT: mv a2, a4
+; CHECK32-NEXT: .LBB85_6:
+; CHECK32-NEXT: csrr a0, vlenb
+; CHECK32-NEXT: slli a0, a0, 1
+; CHECK32-NEXT: mv a4, a0
+; CHECK32-NEXT: slli a0, a0, 3
+; CHECK32-NEXT: add a0, a0, a4
+; CHECK32-NEXT: add a0, sp, a0
+; CHECK32-NEXT: addi a0, a0, 16
+; CHECK32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; CHECK32-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; CHECK32-NEXT: vfwcvtbf16.f.f.v v16, v24
+; CHECK32-NEXT: csrr a0, vlenb
+; CHECK32-NEXT: slli a0, a0, 1
+; CHECK32-NEXT: mv a2, a0
+; CHECK32-NEXT: slli a0, a0, 2
+; CHECK32-NEXT: add a0, a0, a2
+; CHECK32-NEXT: add a0, sp, a0
+; CHECK32-NEXT: addi a0, a0, 16
+; CHECK32-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
+; CHECK32-NEXT: vfwcvtbf16.f.f.v v24, v0
+; CHECK32-NEXT: csrr a0, vlenb
+; CHECK32-NEXT: slli a0, a0, 1
+; CHECK32-NEXT: mv a2, a0
+; CHECK32-NEXT: slli a0, a0, 2
+; CHECK32-NEXT: add a2, a2, a0
+; CHECK32-NEXT: slli a0, a0, 1
+; CHECK32-NEXT: add a0, a0, a2
+; CHECK32-NEXT: add a0, sp, a0
+; CHECK32-NEXT: addi a0, a0, 16
+; CHECK32-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; CHECK32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK32-NEXT: vmfeq.vv v8, v16, v24, v0.t
+; CHECK32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK32-NEXT: vslideup.vx v8, v10, a3
+; CHECK32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK32-NEXT: vslideup.vx v8, v9, a1
+; CHECK32-NEXT: vmv.v.v v0, v8
+; CHECK32-NEXT: csrr a0, vlenb
+; CHECK32-NEXT: mv a1, a0
+; CHECK32-NEXT: slli a0, a0, 1
+; CHECK32-NEXT: add a1, a1, a0
+; CHECK32-NEXT: slli a0, a0, 2
+; CHECK32-NEXT: add a1, a1, a0
+; CHECK32-NEXT: slli a0, a0, 1
+; CHECK32-NEXT: add a0, a0, a1
+; CHECK32-NEXT: add sp, sp, a0
+; CHECK32-NEXT: .cfi_def_cfa sp, 16
+; CHECK32-NEXT: addi sp, sp, 16
+; CHECK32-NEXT: .cfi_def_cfa_offset 0
+; CHECK32-NEXT: ret
+;
+; CHECK64-LABEL: fcmp_oeq_vv_nxv64bf16:
+; CHECK64: # %bb.0:
+; CHECK64-NEXT: addi sp, sp, -16
+; CHECK64-NEXT: .cfi_def_cfa_offset 16
+; CHECK64-NEXT: csrr a1, vlenb
+; CHECK64-NEXT: mv a3, a1
+; CHECK64-NEXT: slli a1, a1, 1
+; CHECK64-NEXT: add a3, a3, a1
+; CHECK64-NEXT: slli a1, a1, 2
+; CHECK64-NEXT: add a3, a3, a1
+; CHECK64-NEXT: slli a1, a1, 1
+; CHECK64-NEXT: add a1, a1, a3
+; CHECK64-NEXT: sub sp, sp, a1
+; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1b, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 27 * vlenb
+; CHECK64-NEXT: csrr a1, vlenb
+; CHECK64-NEXT: slli a1, a1, 1
+; CHECK64-NEXT: mv a3, a1
+; CHECK64-NEXT: slli a1, a1, 3
+; CHECK64-NEXT: add a1, a1, a3
+; CHECK64-NEXT: add a1, sp, a1
+; CHECK64-NEXT: addi a1, a1, 16
+; CHECK64-NEXT: vs1r.v v0, (a1) # vscale x 8-byte Folded Spill
+; CHECK64-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; CHECK64-NEXT: vmv8r.v v0, v16
+; CHECK64-NEXT: csrr a1, vlenb
+; CHECK64-NEXT: mv a3, a1
+; CHECK64-NEXT: slli a1, a1, 1
+; CHECK64-NEXT: add a3, a3, a1
+; CHECK64-NEXT: slli a1, a1, 3
+; CHECK64-NEXT: add a1, a1, a3
+; CHECK64-NEXT: add a1, sp, a1
+; CHECK64-NEXT: addi a1, a1, 16
+; CHECK64-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK64-NEXT: csrr a3, vlenb
+; CHECK64-NEXT: slli a1, a3, 3
+; CHECK64-NEXT: slli a5, a3, 2
+; CHECK64-NEXT: slli a4, a3, 1
+; CHECK64-NEXT: add a1, a0, a1
+; CHECK64-NEXT: sub a6, a2, a5
+; CHECK64-NEXT: vl8re16.v v24, (a1)
+; CHECK64-NEXT: sltu a1, a2, a6
+; CHECK64-NEXT: addi a1, a1, -1
+; CHECK64-NEXT: and a6, a1, a6
+; CHECK64-NEXT: sub a1, a6, a4
+; CHECK64-NEXT: sltu a7, a6, a1
+; CHECK64-NEXT: addi a7, a7, -1
+; CHECK64-NEXT: and a7, a7, a1
+; CHECK64-NEXT: srli a1, a3, 1
+; CHECK64-NEXT: srli a3, a3, 2
+; CHECK64-NEXT: csrr t0, vlenb
+; CHECK64-NEXT: slli t0, t0, 1
+; CHECK64-NEXT: mv t1, t0
+; CHECK64-NEXT: slli t0, t0, 3
+; CHECK64-NEXT: add t0, t0, t1
+; CHECK64-NEXT: add t0, sp, t0
+; CHECK64-NEXT: addi t0, t0, 16
+; CHECK64-NEXT: vl1r.v v8, (t0) # vscale x 8-byte Folded Reload
+; CHECK64-NEXT: vslidedown.vx v16, v8, a1
+; CHECK64-NEXT: vl8re16.v v8, (a0)
+; CHECK64-NEXT: csrr a0, vlenb
+; CHECK64-NEXT: slli a0, a0, 1
+; CHECK64-NEXT: mv t0, a0
+; CHECK64-NEXT: slli a0, a0, 2
+; CHECK64-NEXT: add a0, a0, t0
+; CHECK64-NEXT: add a0, sp, a0
+; CHECK64-NEXT: addi a0, a0, 16
+; CHECK64-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; CHECK64-NEXT: csrr a0, vlenb
+; CHECK64-NEXT: add a0, sp, a0
+; CHECK64-NEXT: addi a0, a0, 16
+; CHECK64-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill
+; CHECK64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK64-NEXT: vslidedown.vx v8, v16, a3
+; CHECK64-NEXT: addi a0, sp, 16
+; CHECK64-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; CHECK64-NEXT: vsetvli zero, a7, e16, m4, ta, ma
+; CHECK64-NEXT: vfwcvtbf16.f.f.v v16, v28
+; CHECK64-NEXT: csrr a0, vlenb
+; CHECK64-NEXT: slli a0, a0, 1
+; CHECK64-NEXT: add a0, sp, a0
+; CHECK64-NEXT: addi a0, a0, 16
+; CHECK64-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
+; CHECK64-NEXT: vfwcvtbf16.f.f.v v8, v4
+; CHECK64-NEXT: bltu a6, a4, .LBB85_2
+; CHECK64-NEXT: # %bb.1:
+; CHECK64-NEXT: mv a6, a4
+; CHECK64-NEXT: .LBB85_2:
+; CHECK64-NEXT: addi a0, sp, 16
+; CHECK64-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; CHECK64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK64-NEXT: vmfeq.vv v5, v8, v16, v0.t
+; CHECK64-NEXT: vsetvli zero, a6, e16, m4, ta, ma
+; CHECK64-NEXT: vfwcvtbf16.f.f.v v16, v24
+; CHECK64-NEXT: csrr a0, vlenb
+; CHECK64-NEXT: slli a0, a0, 1
+; CHECK64-NEXT: add a0, sp, a0
+; CHECK64-NEXT: addi a0, a0, 16
+; CHECK64-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; CHECK64-NEXT: vfwcvtbf16.f.f.v v24, v8
+; CHECK64-NEXT: csrr a0, vlenb
+; CHECK64-NEXT: add a0, sp, a0
+; CHECK64-NEXT: addi a0, a0, 16
+; CHECK64-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; CHECK64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK64-NEXT: vmfeq.vv v6, v24, v16, v0.t
+; CHECK64-NEXT: bltu a2, a5, .LBB85_4
+; CHECK64-NEXT: # %bb.3:
+; CHECK64-NEXT: mv a2, a5
+; CHECK64-NEXT: .LBB85_4:
+; CHECK64-NEXT: sub a0, a2, a4
+; CHECK64-NEXT: csrr a5, vlenb
+; CHECK64-NEXT: slli a5, a5, 1
+; CHECK64-NEXT: mv a6, a5
+; CHECK64-NEXT: slli a5, a5, 3
+; CHECK64-NEXT: add a5, a5, a6
+; CHECK64-NEXT: add a5, sp, a5
+; CHECK64-NEXT: addi a5, a5, 16
+; CHECK64-NEXT: vl1r.v v7, (a5) # vscale x 8-byte Folded Reload
+; CHECK64-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; CHECK64-NEXT: vslidedown.vx v0, v7, a3
+; CHECK64-NEXT: sltu a5, a2, a0
+; CHECK64-NEXT: addi a5, a5, -1
+; CHECK64-NEXT: and a0, a5, a0
+; CHECK64-NEXT: csrr a5, vlenb
+; CHECK64-NEXT: mv a6, a5
+; CHECK64-NEXT: slli a5, a5, 1
+; CHECK64-NEXT: add a6, a6, a5
+; CHECK64-NEXT: slli a5, a5, 3
+; CHECK64-NEXT: add a5, a5, a6
+; CHECK64-NEXT: add a5, sp, a5
+; CHECK64-NEXT: addi a5, a5, 16
+; CHECK64-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; CHECK64-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK64-NEXT: vfwcvtbf16.f.f.v v16, v28
+; CHECK64-NEXT: csrr a0, vlenb
+; CHECK64-NEXT: slli a0, a0, 1
+; CHECK64-NEXT: mv a5, a0
+; CHECK64-NEXT: slli a0, a0, 2
+; CHECK64-NEXT: add a0, a0, a5
+; CHECK64-NEXT: add a0, sp, a0
+; CHECK64-NEXT: addi a0, a0, 16
+; CHECK64-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; CHECK64-NEXT: vfwcvtbf16.f.f.v v24, v12
+; CHECK64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK64-NEXT: vmfeq.vv v4, v16, v24, v0.t
+; CHECK64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK64-NEXT: vslideup.vx v6, v5, a3
+; CHECK64-NEXT: bltu a2, a4, .LBB85_6
+; CHECK64-NEXT: # %bb.5:
+; CHECK64-NEXT: mv a2, a4
+; CHECK64-NEXT: .LBB85_6:
+; CHECK64-NEXT: csrr a0, vlenb
+; CHECK64-NEXT: mv a4, a0
+; CHECK64-NEXT: slli a0, a0, 1
+; CHECK64-NEXT: add a4, a4, a0
+; CHECK64-NEXT: slli a0, a0, 3
+; CHECK64-NEXT: add a0, a0, a4
+; CHECK64-NEXT: add a0, sp, a0
+; CHECK64-NEXT: addi a0, a0, 16
+; CHECK64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; CHECK64-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; CHECK64-NEXT: vfwcvtbf16.f.f.v v16, v24
+; CHECK64-NEXT: csrr a0, vlenb
+; CHECK64-NEXT: slli a0, a0, 1
+; CHECK64-NEXT: mv a2, a0
+; CHECK64-NEXT: slli a0, a0, 2
+; CHECK64-NEXT: add a0, a0, a2
+; CHECK64-NEXT: add a0, sp, a0
+; CHECK64-NEXT: addi a0, a0, 16
+; CHECK64-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; CHECK64-NEXT: vfwcvtbf16.f.f.v v24, v8
+; CHECK64-NEXT: vmv1r.v v0, v7
+; CHECK64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; CHECK64-NEXT: vmfeq.vv v8, v16, v24, v0.t
+; CHECK64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK64-NEXT: vslideup.vx v8, v4, a3
+; CHECK64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK64-NEXT: vslideup.vx v8, v6, a1
+; CHECK64-NEXT: vmv.v.v v0, v8
+; CHECK64-NEXT: csrr a0, vlenb
+; CHECK64-NEXT: mv a1, a0
+; CHECK64-NEXT: slli a0, a0, 1
+; CHECK64-NEXT: add a1, a1, a0
+; CHECK64-NEXT: slli a0, a0, 2
+; CHECK64-NEXT: add a1, a1, a0
+; CHECK64-NEXT: slli a0, a0, 1
+; CHECK64-NEXT: add a0, a0, a1
+; CHECK64-NEXT: add sp, sp, a0
+; CHECK64-NEXT: .cfi_def_cfa sp, 16
+; CHECK64-NEXT: addi sp, sp, 16
+; CHECK64-NEXT: .cfi_def_cfa_offset 0
+; CHECK64-NEXT: ret
%v = call <vscale x 64 x i1> @llvm.vp.fcmp.nxv64bf16(<vscale x 64 x bfloat> %va, <vscale x 64 x bfloat> %vb, metadata !"oeq", <vscale x 64 x i1> %m, i32 %evl)
ret <vscale x 64 x i1> %v
}
@@ -3541,211 +3734,404 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFH-NEXT: .cfi_def_cfa_offset 0
; ZVFH-NEXT: ret
;
-; ZVFHMIN-LABEL: fcmp_oeq_vv_nxv64f16:
-; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: addi sp, sp, -16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: mv a3, a1
-; ZVFHMIN-NEXT: slli a1, a1, 1
-; ZVFHMIN-NEXT: add a3, a3, a1
-; ZVFHMIN-NEXT: slli a1, a1, 2
-; ZVFHMIN-NEXT: add a3, a3, a1
-; ZVFHMIN-NEXT: slli a1, a1, 1
-; ZVFHMIN-NEXT: add a1, a1, a3
-; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1b, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 27 * vlenb
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 1
-; ZVFHMIN-NEXT: mv a3, a1
-; ZVFHMIN-NEXT: slli a1, a1, 2
-; ZVFHMIN-NEXT: add a3, a3, a1
-; ZVFHMIN-NEXT: slli a1, a1, 1
-; ZVFHMIN-NEXT: add a1, a1, a3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs1r.v v0, (a1) # vscale x 8-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m1, ta, ma
-; ZVFHMIN-NEXT: vmv8r.v v0, v16
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 1
-; ZVFHMIN-NEXT: mv a3, a1
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: add a1, a1, a3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: slli a1, a3, 3
-; ZVFHMIN-NEXT: slli a5, a3, 2
-; ZVFHMIN-NEXT: slli a4, a3, 1
-; ZVFHMIN-NEXT: add a1, a0, a1
-; ZVFHMIN-NEXT: sub a6, a2, a5
-; ZVFHMIN-NEXT: vl8re16.v v24, (a1)
-; ZVFHMIN-NEXT: sltu a1, a2, a6
-; ZVFHMIN-NEXT: addi a1, a1, -1
-; ZVFHMIN-NEXT: and a6, a1, a6
-; ZVFHMIN-NEXT: sub a1, a6, a4
-; ZVFHMIN-NEXT: sltu a7, a6, a1
-; ZVFHMIN-NEXT: addi a7, a7, -1
-; ZVFHMIN-NEXT: and a7, a7, a1
-; ZVFHMIN-NEXT: srli a1, a3, 1
-; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: csrr t0, vlenb
-; ZVFHMIN-NEXT: slli t0, t0, 1
-; ZVFHMIN-NEXT: mv t1, t0
-; ZVFHMIN-NEXT: slli t0, t0, 2
-; ZVFHMIN-NEXT: add t1, t1, t0
-; ZVFHMIN-NEXT: slli t0, t0, 1
-; ZVFHMIN-NEXT: add t0, t0, t1
-; ZVFHMIN-NEXT: add t0, sp, t0
-; ZVFHMIN-NEXT: addi t0, t0, 16
-; ZVFHMIN-NEXT: vl1r.v v8, (t0) # vscale x 8-byte Folded Reload
-; ZVFHMIN-NEXT: vslidedown.vx v16, v8, a1
-; ZVFHMIN-NEXT: vl8re16.v v8, (a0)
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: mv t0, a0
-; ZVFHMIN-NEXT: slli a0, a0, 2
-; ZVFHMIN-NEXT: add a0, a0, t0
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v8, v16, a3
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a7, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
-; ZVFHMIN-NEXT: bltu a6, a4, .LBB171_2
-; ZVFHMIN-NEXT: # %bb.1:
-; ZVFHMIN-NEXT: mv a6, a4
-; ZVFHMIN-NEXT: .LBB171_2:
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmfeq.vv v5, v8, v16, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, a6, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmfeq.vv v7, v24, v16, v0.t
-; ZVFHMIN-NEXT: bltu a2, a5, .LBB171_4
-; ZVFHMIN-NEXT: # %bb.3:
-; ZVFHMIN-NEXT: mv a2, a5
-; ZVFHMIN-NEXT: .LBB171_4:
-; ZVFHMIN-NEXT: sub a0, a2, a4
-; ZVFHMIN-NEXT: csrr a5, vlenb
-; ZVFHMIN-NEXT: slli a5, a5, 1
-; ZVFHMIN-NEXT: mv a6, a5
-; ZVFHMIN-NEXT: slli a5, a5, 2
-; ZVFHMIN-NEXT: add a6, a6, a5
-; ZVFHMIN-NEXT: slli a5, a5, 1
-; ZVFHMIN-NEXT: add a5, a5, a6
-; ZVFHMIN-NEXT: add a5, sp, a5
-; ZVFHMIN-NEXT: addi a5, a5, 16
-; ZVFHMIN-NEXT: vl1r.v v8, (a5) # vscale x 8-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v8, a3
-; ZVFHMIN-NEXT: sltu a5, a2, a0
-; ZVFHMIN-NEXT: addi a5, a5, -1
-; ZVFHMIN-NEXT: and a0, a5, a0
-; ZVFHMIN-NEXT: csrr a5, vlenb
-; ZVFHMIN-NEXT: slli a5, a5, 1
-; ZVFHMIN-NEXT: mv a6, a5
-; ZVFHMIN-NEXT: slli a5, a5, 3
-; ZVFHMIN-NEXT: add a5, a5, a6
-; ZVFHMIN-NEXT: add a5, sp, a5
-; ZVFHMIN-NEXT: addi a5, a5, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: mv a5, a0
-; ZVFHMIN-NEXT: slli a0, a0, 2
-; ZVFHMIN-NEXT: add a0, a0, a5
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmfeq.vv v10, v16, v24, v0.t
-; ZVFHMIN-NEXT: vmv1r.v v9, v7
-; ZVFHMIN-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslideup.vx v9, v5, a3
-; ZVFHMIN-NEXT: bltu a2, a4, .LBB171_6
-; ZVFHMIN-NEXT: # %bb.5:
-; ZVFHMIN-NEXT: mv a2, a4
-; ZVFHMIN-NEXT: .LBB171_6:
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: mv a4, a0
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, a0, a4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: mv a2, a0
-; ZVFHMIN-NEXT: slli a0, a0, 2
-; ZVFHMIN-NEXT: add a0, a0, a2
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: mv a2, a0
-; ZVFHMIN-NEXT: slli a0, a0, 2
-; ZVFHMIN-NEXT: add a2, a2, a0
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: add a0, a0, a2
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslideup.vx v8, v10, a3
-; ZVFHMIN-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; ZVFHMIN-NEXT: vslideup.vx v8, v9, a1
-; ZVFHMIN-NEXT: vmv.v.v v0, v8
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: add a1, a1, a0
-; ZVFHMIN-NEXT: slli a0, a0, 2
-; ZVFHMIN-NEXT: add a1, a1, a0
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: add a0, a0, a1
-; ZVFHMIN-NEXT: add sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT: addi sp, sp, 16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT: ret
+; ZVFHMIN32-LABEL: fcmp_oeq_vv_nxv64f16:
+; ZVFHMIN32: # %bb.0:
+; ZVFHMIN32-NEXT: addi sp, sp, -16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT: csrr a1, vlenb
+; ZVFHMIN32-NEXT: mv a3, a1
+; ZVFHMIN32-NEXT: slli a1, a1, 1
+; ZVFHMIN32-NEXT: add a3, a3, a1
+; ZVFHMIN32-NEXT: slli a1, a1, 2
+; ZVFHMIN32-NEXT: add a3, a3, a1
+; ZVFHMIN32-NEXT: slli a1, a1, 1
+; ZVFHMIN32-NEXT: add a1, a1, a3
+; ZVFHMIN32-NEXT: sub sp, sp, a1
+; ZVFHMIN32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1b, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 27 * vlenb
+; ZVFHMIN32-NEXT: csrr a1, vlenb
+; ZVFHMIN32-NEXT: slli a1, a1, 1
+; ZVFHMIN32-NEXT: mv a3, a1
+; ZVFHMIN32-NEXT: slli a1, a1, 2
+; ZVFHMIN32-NEXT: add a3, a3, a1
+; ZVFHMIN32-NEXT: slli a1, a1, 1
+; ZVFHMIN32-NEXT: add a1, a1, a3
+; ZVFHMIN32-NEXT: add a1, sp, a1
+; ZVFHMIN32-NEXT: addi a1, a1, 16
+; ZVFHMIN32-NEXT: vs1r.v v0, (a1) # vscale x 8-byte Folded Spill
+; ZVFHMIN32-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; ZVFHMIN32-NEXT: vmv8r.v v0, v16
+; ZVFHMIN32-NEXT: csrr a1, vlenb
+; ZVFHMIN32-NEXT: slli a1, a1, 1
+; ZVFHMIN32-NEXT: mv a3, a1
+; ZVFHMIN32-NEXT: slli a1, a1, 3
+; ZVFHMIN32-NEXT: add a1, a1, a3
+; ZVFHMIN32-NEXT: add a1, sp, a1
+; ZVFHMIN32-NEXT: addi a1, a1, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a3, vlenb
+; ZVFHMIN32-NEXT: slli a1, a3, 3
+; ZVFHMIN32-NEXT: slli a5, a3, 2
+; ZVFHMIN32-NEXT: slli a4, a3, 1
+; ZVFHMIN32-NEXT: add a1, a0, a1
+; ZVFHMIN32-NEXT: sub a6, a2, a5
+; ZVFHMIN32-NEXT: vl8re16.v v24, (a1)
+; ZVFHMIN32-NEXT: sltu a1, a2, a6
+; ZVFHMIN32-NEXT: addi a1, a1, -1
+; ZVFHMIN32-NEXT: and a6, a1, a6
+; ZVFHMIN32-NEXT: sub a1, a6, a4
+; ZVFHMIN32-NEXT: sltu a7, a6, a1
+; ZVFHMIN32-NEXT: addi a7, a7, -1
+; ZVFHMIN32-NEXT: and a7, a7, a1
+; ZVFHMIN32-NEXT: srli a1, a3, 1
+; ZVFHMIN32-NEXT: srli a3, a3, 2
+; ZVFHMIN32-NEXT: csrr t0, vlenb
+; ZVFHMIN32-NEXT: slli t0, t0, 1
+; ZVFHMIN32-NEXT: mv t1, t0
+; ZVFHMIN32-NEXT: slli t0, t0, 2
+; ZVFHMIN32-NEXT: add t1, t1, t0
+; ZVFHMIN32-NEXT: slli t0, t0, 1
+; ZVFHMIN32-NEXT: add t0, t0, t1
+; ZVFHMIN32-NEXT: add t0, sp, t0
+; ZVFHMIN32-NEXT: addi t0, t0, 16
+; ZVFHMIN32-NEXT: vl1r.v v8, (t0) # vscale x 8-byte Folded Reload
+; ZVFHMIN32-NEXT: vslidedown.vx v16, v8, a1
+; ZVFHMIN32-NEXT: vl8re16.v v8, (a0)
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: mv t0, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 2
+; ZVFHMIN32-NEXT: add a0, a0, t0
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill
+; ZVFHMIN32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT: vslidedown.vx v8, v16, a3
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; ZVFHMIN32-NEXT: vsetvli zero, a7, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v16, v28
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v8, v4
+; ZVFHMIN32-NEXT: bltu a6, a4, .LBB171_2
+; ZVFHMIN32-NEXT: # %bb.1:
+; ZVFHMIN32-NEXT: mv a6, a4
+; ZVFHMIN32-NEXT: .LBB171_2:
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vmfeq.vv v5, v8, v16, v0.t
+; ZVFHMIN32-NEXT: vsetvli zero, a6, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vmfeq.vv v7, v24, v16, v0.t
+; ZVFHMIN32-NEXT: bltu a2, a5, .LBB171_4
+; ZVFHMIN32-NEXT: # %bb.3:
+; ZVFHMIN32-NEXT: mv a2, a5
+; ZVFHMIN32-NEXT: .LBB171_4:
+; ZVFHMIN32-NEXT: sub a0, a2, a4
+; ZVFHMIN32-NEXT: csrr a5, vlenb
+; ZVFHMIN32-NEXT: slli a5, a5, 1
+; ZVFHMIN32-NEXT: mv a6, a5
+; ZVFHMIN32-NEXT: slli a5, a5, 2
+; ZVFHMIN32-NEXT: add a6, a6, a5
+; ZVFHMIN32-NEXT: slli a5, a5, 1
+; ZVFHMIN32-NEXT: add a5, a5, a6
+; ZVFHMIN32-NEXT: add a5, sp, a5
+; ZVFHMIN32-NEXT: addi a5, a5, 16
+; ZVFHMIN32-NEXT: vl1r.v v8, (a5) # vscale x 8-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT: vslidedown.vx v0, v8, a3
+; ZVFHMIN32-NEXT: sltu a5, a2, a0
+; ZVFHMIN32-NEXT: addi a5, a5, -1
+; ZVFHMIN32-NEXT: and a0, a5, a0
+; ZVFHMIN32-NEXT: csrr a5, vlenb
+; ZVFHMIN32-NEXT: slli a5, a5, 1
+; ZVFHMIN32-NEXT: mv a6, a5
+; ZVFHMIN32-NEXT: slli a5, a5, 3
+; ZVFHMIN32-NEXT: add a5, a5, a6
+; ZVFHMIN32-NEXT: add a5, sp, a5
+; ZVFHMIN32-NEXT: addi a5, a5, 16
+; ZVFHMIN32-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v16, v28
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: mv a5, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 2
+; ZVFHMIN32-NEXT: add a0, a0, a5
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v12
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vmfeq.vv v10, v16, v24, v0.t
+; ZVFHMIN32-NEXT: vmv1r.v v9, v7
+; ZVFHMIN32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT: vslideup.vx v9, v5, a3
+; ZVFHMIN32-NEXT: bltu a2, a4, .LBB171_6
+; ZVFHMIN32-NEXT: # %bb.5:
+; ZVFHMIN32-NEXT: mv a2, a4
+; ZVFHMIN32-NEXT: .LBB171_6:
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: mv a4, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: add a0, a0, a4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: mv a2, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 2
+; ZVFHMIN32-NEXT: add a0, a0, a2
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: mv a2, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 2
+; ZVFHMIN32-NEXT: add a2, a2, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: add a0, a0, a2
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vmfeq.vv v8, v16, v24, v0.t
+; ZVFHMIN32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT: vslideup.vx v8, v10, a3
+; ZVFHMIN32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZVFHMIN32-NEXT: vslideup.vx v8, v9, a1
+; ZVFHMIN32-NEXT: vmv.v.v v0, v8
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: add a1, a1, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 2
+; ZVFHMIN32-NEXT: add a1, a1, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: add a0, a0, a1
+; ZVFHMIN32-NEXT: add sp, sp, a0
+; ZVFHMIN32-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT: addi sp, sp, 16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT: ret
+;
+; ZVFHMIN64-LABEL: fcmp_oeq_vv_nxv64f16:
+; ZVFHMIN64: # %bb.0:
+; ZVFHMIN64-NEXT: addi sp, sp, -16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT: csrr a1, vlenb
+; ZVFHMIN64-NEXT: mv a3, a1
+; ZVFHMIN64-NEXT: slli a1, a1, 1
+; ZVFHMIN64-NEXT: add a3, a3, a1
+; ZVFHMIN64-NEXT: slli a1, a1, 2
+; ZVFHMIN64-NEXT: add a3, a3, a1
+; ZVFHMIN64-NEXT: slli a1, a1, 1
+; ZVFHMIN64-NEXT: add a1, a1, a3
+; ZVFHMIN64-NEXT: sub sp, sp, a1
+; ZVFHMIN64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1b, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 27 * vlenb
+; ZVFHMIN64-NEXT: csrr a1, vlenb
+; ZVFHMIN64-NEXT: slli a1, a1, 1
+; ZVFHMIN64-NEXT: mv a3, a1
+; ZVFHMIN64-NEXT: slli a1, a1, 3
+; ZVFHMIN64-NEXT: add a1, a1, a3
+; ZVFHMIN64-NEXT: add a1, sp, a1
+; ZVFHMIN64-NEXT: addi a1, a1, 16
+; ZVFHMIN64-NEXT: vs1r.v v0, (a1) # vscale x 8-byte Folded Spill
+; ZVFHMIN64-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; ZVFHMIN64-NEXT: vmv8r.v v0, v16
+; ZVFHMIN64-NEXT: csrr a1, vlenb
+; ZVFHMIN64-NEXT: mv a3, a1
+; ZVFHMIN64-NEXT: slli a1, a1, 1
+; ZVFHMIN64-NEXT: add a3, a3, a1
+; ZVFHMIN64-NEXT: slli a1, a1, 3
+; ZVFHMIN64-NEXT: add a1, a1, a3
+; ZVFHMIN64-NEXT: add a1, sp, a1
+; ZVFHMIN64-NEXT: addi a1, a1, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a3, vlenb
+; ZVFHMIN64-NEXT: slli a1, a3, 3
+; ZVFHMIN64-NEXT: slli a5, a3, 2
+; ZVFHMIN64-NEXT: slli a4, a3, 1
+; ZVFHMIN64-NEXT: add a1, a0, a1
+; ZVFHMIN64-NEXT: sub a6, a2, a5
+; ZVFHMIN64-NEXT: vl8re16.v v24, (a1)
+; ZVFHMIN64-NEXT: sltu a1, a2, a6
+; ZVFHMIN64-NEXT: addi a1, a1, -1
+; ZVFHMIN64-NEXT: and a6, a1, a6
+; ZVFHMIN64-NEXT: sub a1, a6, a4
+; ZVFHMIN64-NEXT: sltu a7, a6, a1
+; ZVFHMIN64-NEXT: addi a7, a7, -1
+; ZVFHMIN64-NEXT: and a7, a7, a1
+; ZVFHMIN64-NEXT: srli a1, a3, 1
+; ZVFHMIN64-NEXT: srli a3, a3, 2
+; ZVFHMIN64-NEXT: csrr t0, vlenb
+; ZVFHMIN64-NEXT: slli t0, t0, 1
+; ZVFHMIN64-NEXT: mv t1, t0
+; ZVFHMIN64-NEXT: slli t0, t0, 3
+; ZVFHMIN64-NEXT: add t0, t0, t1
+; ZVFHMIN64-NEXT: add t0, sp, t0
+; ZVFHMIN64-NEXT: addi t0, t0, 16
+; ZVFHMIN64-NEXT: vl1r.v v8, (t0) # vscale x 8-byte Folded Reload
+; ZVFHMIN64-NEXT: vslidedown.vx v16, v8, a1
+; ZVFHMIN64-NEXT: vl8re16.v v8, (a0)
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 1
+; ZVFHMIN64-NEXT: mv t0, a0
+; ZVFHMIN64-NEXT: slli a0, a0, 2
+; ZVFHMIN64-NEXT: add a0, a0, t0
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vs1r.v v16, (a0) # vscale x 8-byte Folded Spill
+; ZVFHMIN64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT: vslidedown.vx v8, v16, a3
+; ZVFHMIN64-NEXT: addi a0, sp, 16
+; ZVFHMIN64-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
+; ZVFHMIN64-NEXT: vsetvli zero, a7, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v16, v28
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 1
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v8, v4
+; ZVFHMIN64-NEXT: bltu a6, a4, .LBB171_2
+; ZVFHMIN64-NEXT: # %bb.1:
+; ZVFHMIN64-NEXT: mv a6, a4
+; ZVFHMIN64-NEXT: .LBB171_2:
+; ZVFHMIN64-NEXT: addi a0, sp, 16
+; ZVFHMIN64-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vmfeq.vv v5, v8, v16, v0.t
+; ZVFHMIN64-NEXT: vsetvli zero, a6, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 1
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl1r.v v0, (a0) # vscale x 8-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vmfeq.vv v6, v24, v16, v0.t
+; ZVFHMIN64-NEXT: bltu a2, a5, .LBB171_4
+; ZVFHMIN64-NEXT: # %bb.3:
+; ZVFHMIN64-NEXT: mv a2, a5
+; ZVFHMIN64-NEXT: .LBB171_4:
+; ZVFHMIN64-NEXT: sub a0, a2, a4
+; ZVFHMIN64-NEXT: csrr a5, vlenb
+; ZVFHMIN64-NEXT: slli a5, a5, 1
+; ZVFHMIN64-NEXT: mv a6, a5
+; ZVFHMIN64-NEXT: slli a5, a5, 3
+; ZVFHMIN64-NEXT: add a5, a5, a6
+; ZVFHMIN64-NEXT: add a5, sp, a5
+; ZVFHMIN64-NEXT: addi a5, a5, 16
+; ZVFHMIN64-NEXT: vl1r.v v7, (a5) # vscale x 8-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT: vslidedown.vx v0, v7, a3
+; ZVFHMIN64-NEXT: sltu a5, a2, a0
+; ZVFHMIN64-NEXT: addi a5, a5, -1
+; ZVFHMIN64-NEXT: and a0, a5, a0
+; ZVFHMIN64-NEXT: csrr a5, vlenb
+; ZVFHMIN64-NEXT: mv a6, a5
+; ZVFHMIN64-NEXT: slli a5, a5, 1
+; ZVFHMIN64-NEXT: add a6, a6, a5
+; ZVFHMIN64-NEXT: slli a5, a5, 3
+; ZVFHMIN64-NEXT: add a5, a5, a6
+; ZVFHMIN64-NEXT: add a5, sp, a5
+; ZVFHMIN64-NEXT: addi a5, a5, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v16, v28
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 1
+; ZVFHMIN64-NEXT: mv a5, a0
+; ZVFHMIN64-NEXT: slli a0, a0, 2
+; ZVFHMIN64-NEXT: add a0, a0, a5
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v12
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vmfeq.vv v4, v16, v24, v0.t
+; ZVFHMIN64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT: vslideup.vx v6, v5, a3
+; ZVFHMIN64-NEXT: bltu a2, a4, .LBB171_6
+; ZVFHMIN64-NEXT: # %bb.5:
+; ZVFHMIN64-NEXT: mv a2, a4
+; ZVFHMIN64-NEXT: .LBB171_6:
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: mv a4, a0
+; ZVFHMIN64-NEXT: slli a0, a0, 1
+; ZVFHMIN64-NEXT: add a4, a4, a0
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: add a0, a0, a4
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 1
+; ZVFHMIN64-NEXT: mv a2, a0
+; ZVFHMIN64-NEXT: slli a0, a0, 2
+; ZVFHMIN64-NEXT: add a0, a0, a2
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFHMIN64-NEXT: vmv1r.v v0, v7
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vmfeq.vv v8, v16, v24, v0.t
+; ZVFHMIN64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT: vslideup.vx v8, v4, a3
+; ZVFHMIN64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZVFHMIN64-NEXT: vslideup.vx v8, v6, a1
+; ZVFHMIN64-NEXT: vmv.v.v v0, v8
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: slli a0, a0, 1
+; ZVFHMIN64-NEXT: add a1, a1, a0
+; ZVFHMIN64-NEXT: slli a0, a0, 2
+; ZVFHMIN64-NEXT: add a1, a1, a0
+; ZVFHMIN64-NEXT: slli a0, a0, 1
+; ZVFHMIN64-NEXT: add a0, a0, a1
+; ZVFHMIN64-NEXT: add sp, sp, a0
+; ZVFHMIN64-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT: addi sp, sp, 16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT: ret
%v = call <vscale x 64 x i1> @llvm.vp.fcmp.nxv64f16(<vscale x 64 x half> %va, <vscale x 64 x half> %vb, metadata !"oeq", <vscale x 64 x i1> %m, i32 %evl)
ret <vscale x 64 x i1> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
index 728fa07a7d4e5..33fe6e10e915d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
@@ -7,10 +7,10 @@
; RUN: --check-prefixes=CHECK,ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN32
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN64
declare <vscale x 1 x bfloat> @llvm.vp.fma.nxv1bf16(<vscale x 1 x bfloat>, <vscale x 1 x bfloat>, <vscale x 1 x bfloat>, <vscale x 1 x i1>, i32)
@@ -8786,1031 +8786,965 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFH-NEXT: vmv.v.v v8, v16
; ZVFH-NEXT: ret
;
-; ZVFHMIN-LABEL: vfnmadd_vv_nxv32f16:
-; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: addi sp, sp, -16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 5
-; ZVFHMIN-NEXT: sub sp, sp, a2
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v3, v0
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: mv a3, a2
-; ZVFHMIN-NEXT: slli a2, a2, 1
-; ZVFHMIN-NEXT: add a2, a2, a3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vl8re16.v v8, (a0)
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: lui a2, 8
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: vxor.vx v8, v16, a2, v0.t
-; ZVFHMIN-NEXT: slli a0, a3, 1
-; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: sub a4, a1, a0
-; ZVFHMIN-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
-; ZVFHMIN-NEXT: sltu a3, a1, a4
-; ZVFHMIN-NEXT: csrr a5, vlenb
-; ZVFHMIN-NEXT: slli a5, a5, 4
-; ZVFHMIN-NEXT: add a5, sp, a5
-; ZVFHMIN-NEXT: addi a5, a5, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vxor.vx v24, v24, a2, v0.t
-; ZVFHMIN-NEXT: addi a3, a3, -1
-; ZVFHMIN-NEXT: and a3, a3, a4
-; ZVFHMIN-NEXT: vmv1r.v v0, v6
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28, v0.t
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 4
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: mv a3, a2
-; ZVFHMIN-NEXT: slli a2, a2, 1
-; ZVFHMIN-NEXT: add a2, a2, a3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24, v0.t
-; ZVFHMIN-NEXT: bltu a1, a0, .LBB286_2
-; ZVFHMIN-NEXT: # %bb.1:
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: .LBB286_2:
-; ZVFHMIN-NEXT: vmv1r.v v0, v3
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16, v0.t
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: add a0, a0, a1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16, v0.t
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16, v0.t
-; ZVFHMIN-NEXT: vmv4r.v v12, v4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24, v0.t
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
-; ZVFHMIN-NEXT: add sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT: addi sp, sp, 16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT: ret
- %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
- %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
- %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
- ret <vscale x 32 x half> %v
-}
-
-define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vv_nxv32f16_commuted:
-; ZVFH: # %bb.0:
-; ZVFH-NEXT: vl8re16.v v24, (a0)
-; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFH-NEXT: vfnmadd.vv v8, v16, v24, v0.t
-; ZVFH-NEXT: ret
-;
-; ZVFHMIN-LABEL: vfnmadd_vv_nxv32f16_commuted:
-; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: addi sp, sp, -16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 5
-; ZVFHMIN-NEXT: sub sp, sp, a2
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v3, v0
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: mv a3, a2
-; ZVFHMIN-NEXT: slli a2, a2, 1
-; ZVFHMIN-NEXT: add a2, a2, a3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vl8re16.v v8, (a0)
-; ZVFHMIN-NEXT: lui a2, 8
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: vxor.vx v16, v16, a2, v0.t
-; ZVFHMIN-NEXT: slli a0, a3, 1
-; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: sub a4, a1, a0
-; ZVFHMIN-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
-; ZVFHMIN-NEXT: sltu a3, a1, a4
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vxor.vx v8, v8, a2, v0.t
-; ZVFHMIN-NEXT: addi a3, a3, -1
-; ZVFHMIN-NEXT: and a3, a3, a4
-; ZVFHMIN-NEXT: vmv1r.v v0, v6
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 4
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: mv a3, a2
-; ZVFHMIN-NEXT: slli a2, a2, 1
-; ZVFHMIN-NEXT: add a2, a2, a3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v16, v0.t
-; ZVFHMIN-NEXT: bltu a1, a0, .LBB287_2
-; ZVFHMIN-NEXT: # %bb.1:
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: .LBB287_2:
-; ZVFHMIN-NEXT: vmv1r.v v0, v3
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: add a0, a0, a1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16, v0.t
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t
-; ZVFHMIN-NEXT: vmv.v.v v16, v8
-; ZVFHMIN-NEXT: vmv4r.v v12, v4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16, v0.t
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
-; ZVFHMIN-NEXT: add sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT: addi sp, sp, 16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT: ret
- %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
- %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
- %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
- ret <vscale x 32 x half> %v
-}
-
-define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vv_nxv32f16_unmasked:
-; ZVFH: # %bb.0:
-; ZVFH-NEXT: vl8re16.v v24, (a0)
-; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFH-NEXT: vfnmadd.vv v8, v16, v24
-; ZVFH-NEXT: ret
-;
-; ZVFHMIN-LABEL: vfnmadd_vv_nxv32f16_unmasked:
-; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: addi sp, sp, -16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 5
-; ZVFHMIN-NEXT: sub sp, sp, a2
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: mv a3, a2
-; ZVFHMIN-NEXT: slli a2, a2, 1
-; ZVFHMIN-NEXT: add a2, a2, a3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
-; ZVFHMIN-NEXT: lui a2, 8
-; ZVFHMIN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmset.m v8
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vxor.vx v16, v16, a2
-; ZVFHMIN-NEXT: slli a0, a3, 1
-; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: sub a4, a1, a0
-; ZVFHMIN-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v8, a3
-; ZVFHMIN-NEXT: sltu a3, a1, a4
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vxor.vx v8, v24, a2
-; ZVFHMIN-NEXT: addi a3, a3, -1
-; ZVFHMIN-NEXT: and a3, a3, a4
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 4
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: mv a3, a2
-; ZVFHMIN-NEXT: slli a2, a2, 1
-; ZVFHMIN-NEXT: add a2, a2, a3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT: bltu a1, a0, .LBB288_2
-; ZVFHMIN-NEXT: # %bb.1:
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: .LBB288_2:
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: add a0, a0, a1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
-; ZVFHMIN-NEXT: add sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT: addi sp, sp, 16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT: ret
- %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
- %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
- %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
- ret <vscale x 32 x half> %v
-}
-
-define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vv_nxv32f16_unmasked_commuted:
-; ZVFH: # %bb.0:
-; ZVFH-NEXT: vl8re16.v v24, (a0)
-; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFH-NEXT: vfnmadd.vv v8, v16, v24
-; ZVFH-NEXT: ret
-;
-; ZVFHMIN-LABEL: vfnmadd_vv_nxv32f16_unmasked_commuted:
-; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: addi sp, sp, -16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 5
-; ZVFHMIN-NEXT: sub sp, sp, a2
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: mv a3, a2
-; ZVFHMIN-NEXT: slli a2, a2, 1
-; ZVFHMIN-NEXT: add a2, a2, a3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
-; ZVFHMIN-NEXT: lui a2, 8
-; ZVFHMIN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmset.m v8
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vxor.vx v16, v16, a2
-; ZVFHMIN-NEXT: slli a0, a3, 1
-; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: sub a4, a1, a0
-; ZVFHMIN-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v8, a3
-; ZVFHMIN-NEXT: sltu a3, a1, a4
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vxor.vx v8, v24, a2
-; ZVFHMIN-NEXT: addi a3, a3, -1
-; ZVFHMIN-NEXT: and a3, a3, a4
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 4
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: mv a3, a2
-; ZVFHMIN-NEXT: slli a2, a2, 1
-; ZVFHMIN-NEXT: add a2, a2, a3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16, v0.t
-; ZVFHMIN-NEXT: bltu a1, a0, .LBB289_2
-; ZVFHMIN-NEXT: # %bb.1:
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: .LBB289_2:
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: add a0, a0, a1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
-; ZVFHMIN-NEXT: add sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT: addi sp, sp, 16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT: ret
- %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
- %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
- %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
- ret <vscale x 32 x half> %v
-}
-
-define <vscale x 32 x half> @vfnmadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vf_nxv32f16:
-; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16, v0.t
-; ZVFH-NEXT: ret
-;
-; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16:
-; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: addi sp, sp, -16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 5
-; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v3, v0
-; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: lui a2, 8
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: vmv.v.x v24, a1
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: mv a4, a1
-; ZVFHMIN-NEXT: slli a1, a1, 1
-; ZVFHMIN-NEXT: add a1, a1, a4
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a1) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vxor.vx v8, v8, a2, v0.t
-; ZVFHMIN-NEXT: vxor.vx v16, v16, a2, v0.t
-; ZVFHMIN-NEXT: slli a1, a3, 1
-; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: sub a2, a0, a1
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a3
-; ZVFHMIN-NEXT: sltu a3, a0, a2
-; ZVFHMIN-NEXT: addi a3, a3, -1
-; ZVFHMIN-NEXT: and a2, a3, a2
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: slli a3, a3, 3
-; ZVFHMIN-NEXT: add a3, sp, a3
-; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 4
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: mv a3, a2
-; ZVFHMIN-NEXT: slli a2, a2, 1
-; ZVFHMIN-NEXT: add a2, a2, a3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v16, v0.t
-; ZVFHMIN-NEXT: bltu a0, a1, .LBB290_2
-; ZVFHMIN-NEXT: # %bb.1:
-; ZVFHMIN-NEXT: mv a0, a1
-; ZVFHMIN-NEXT: .LBB290_2:
-; ZVFHMIN-NEXT: vmv1r.v v0, v3
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: add a0, a0, a1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16, v0.t
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v8, v24, v16, v0.t
-; ZVFHMIN-NEXT: vmv.v.v v16, v8
-; ZVFHMIN-NEXT: vmv4r.v v12, v4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16, v0.t
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
-; ZVFHMIN-NEXT: add sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT: addi sp, sp, 16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT: ret
- %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
- %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
- %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
- %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
- %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negva, <vscale x 32 x half> %vb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
- ret <vscale x 32 x half> %v
-}
-
-define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vf_nxv32f16_commute:
-; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16, v0.t
-; ZVFH-NEXT: ret
-;
-; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_commute:
-; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: addi sp, sp, -16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 5
-; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v3, v0
-; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: lui a2, 8
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: vmv.v.x v24, a1
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: mv a4, a1
-; ZVFHMIN-NEXT: slli a1, a1, 1
-; ZVFHMIN-NEXT: add a1, a1, a4
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a1) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vxor.vx v8, v8, a2, v0.t
-; ZVFHMIN-NEXT: vxor.vx v16, v16, a2, v0.t
-; ZVFHMIN-NEXT: slli a1, a3, 1
-; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: sub a2, a0, a1
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a3
-; ZVFHMIN-NEXT: sltu a3, a0, a2
-; ZVFHMIN-NEXT: addi a3, a3, -1
-; ZVFHMIN-NEXT: and a2, a3, a2
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: slli a3, a3, 3
-; ZVFHMIN-NEXT: add a3, sp, a3
-; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 4
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: mv a3, a2
-; ZVFHMIN-NEXT: slli a2, a2, 1
-; ZVFHMIN-NEXT: add a2, a2, a3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28, v0.t
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v16, v0.t
-; ZVFHMIN-NEXT: bltu a0, a1, .LBB291_2
-; ZVFHMIN-NEXT: # %bb.1:
-; ZVFHMIN-NEXT: mv a0, a1
-; ZVFHMIN-NEXT: .LBB291_2:
-; ZVFHMIN-NEXT: vmv1r.v v0, v3
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16, v0.t
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: add a0, a0, a1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v8, v24, v16, v0.t
-; ZVFHMIN-NEXT: vmv.v.v v16, v8
-; ZVFHMIN-NEXT: vmv4r.v v12, v4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16, v0.t
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
-; ZVFHMIN-NEXT: add sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT: addi sp, sp, 16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT: ret
- %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
- %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
- %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
- %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
- %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %negva, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
- ret <vscale x 32 x half> %v
-}
-
-define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vf_nxv32f16_unmasked:
-; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16
-; ZVFH-NEXT: ret
-;
-; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_unmasked:
-; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: addi sp, sp, -16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: mv a2, a1
-; ZVFHMIN-NEXT: slli a1, a1, 2
-; ZVFHMIN-NEXT: add a1, a1, a2
-; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
-; ZVFHMIN-NEXT: fmv.x.h a2, fa0
-; ZVFHMIN-NEXT: lui a1, 8
-; ZVFHMIN-NEXT: vsetvli a3, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmset.m v7
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vxor.vx v24, v8, a1
-; ZVFHMIN-NEXT: vxor.vx v8, v16, a1
-; ZVFHMIN-NEXT: slli a1, a3, 1
-; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: sub a4, a0, a1
-; ZVFHMIN-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a3
-; ZVFHMIN-NEXT: sltu a3, a0, a4
-; ZVFHMIN-NEXT: addi a3, a3, -1
-; ZVFHMIN-NEXT: and a3, a3, a4
-; ZVFHMIN-NEXT: csrr a4, vlenb
-; ZVFHMIN-NEXT: slli a4, a4, 3
-; ZVFHMIN-NEXT: mv a5, a4
-; ZVFHMIN-NEXT: slli a4, a4, 1
-; ZVFHMIN-NEXT: add a4, a4, a5
-; ZVFHMIN-NEXT: add a4, sp, a4
-; ZVFHMIN-NEXT: addi a4, a4, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
-; ZVFHMIN-NEXT: csrr a4, vlenb
-; ZVFHMIN-NEXT: slli a4, a4, 4
-; ZVFHMIN-NEXT: add a4, sp, a4
-; ZVFHMIN-NEXT: addi a4, a4, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a4, vlenb
-; ZVFHMIN-NEXT: slli a4, a4, 5
-; ZVFHMIN-NEXT: add a4, sp, a4
-; ZVFHMIN-NEXT: addi a4, a4, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28, v0.t
-; ZVFHMIN-NEXT: csrr a4, vlenb
-; ZVFHMIN-NEXT: slli a4, a4, 3
-; ZVFHMIN-NEXT: add a4, sp, a4
-; ZVFHMIN-NEXT: addi a4, a4, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vmv.v.x v8, a2
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 4
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT: bltu a0, a1, .LBB292_2
-; ZVFHMIN-NEXT: # %bb.1:
-; ZVFHMIN-NEXT: mv a0, a1
-; ZVFHMIN-NEXT: .LBB292_2:
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: mv a2, a1
-; ZVFHMIN-NEXT: slli a1, a1, 1
-; ZVFHMIN-NEXT: add a1, a1, a2
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v24, v0, v16
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: slli a0, a0, 2
-; ZVFHMIN-NEXT: add a0, a0, a1
-; ZVFHMIN-NEXT: add sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT: addi sp, sp, 16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT: ret
- %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
- %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
- %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
- %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
- %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negva, <vscale x 32 x half> %vb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
- ret <vscale x 32 x half> %v
-}
-
-define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vf_nxv32f16_unmasked_commute:
-; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16
-; ZVFH-NEXT: ret
-;
-; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_unmasked_commute:
-; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: addi sp, sp, -16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 5
-; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: fmv.x.h a2, fa0
-; ZVFHMIN-NEXT: lui a1, 8
-; ZVFHMIN-NEXT: vsetvli a3, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmset.m v24
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vxor.vx v8, v8, a1
-; ZVFHMIN-NEXT: vxor.vx v16, v16, a1
-; ZVFHMIN-NEXT: slli a1, a3, 1
-; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: sub a4, a0, a1
-; ZVFHMIN-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a3
-; ZVFHMIN-NEXT: sltu a3, a0, a4
-; ZVFHMIN-NEXT: addi a3, a3, -1
-; ZVFHMIN-NEXT: and a3, a3, a4
-; ZVFHMIN-NEXT: csrr a4, vlenb
-; ZVFHMIN-NEXT: slli a4, a4, 4
-; ZVFHMIN-NEXT: add a4, sp, a4
-; ZVFHMIN-NEXT: addi a4, a4, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t
-; ZVFHMIN-NEXT: csrr a4, vlenb
-; ZVFHMIN-NEXT: slli a4, a4, 3
-; ZVFHMIN-NEXT: add a4, sp, a4
-; ZVFHMIN-NEXT: addi a4, a4, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a4, vlenb
-; ZVFHMIN-NEXT: slli a4, a4, 3
-; ZVFHMIN-NEXT: mv a5, a4
-; ZVFHMIN-NEXT: slli a4, a4, 1
-; ZVFHMIN-NEXT: add a4, a4, a5
-; ZVFHMIN-NEXT: add a4, sp, a4
-; ZVFHMIN-NEXT: addi a4, a4, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
-; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vmv.v.x v8, a2
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16, v0.t
-; ZVFHMIN-NEXT: bltu a0, a1, .LBB293_2
-; ZVFHMIN-NEXT: # %bb.1:
-; ZVFHMIN-NEXT: mv a0, a1
-; ZVFHMIN-NEXT: .LBB293_2:
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 4
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: add a0, a0, a1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v24
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
-; ZVFHMIN-NEXT: add sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
-; ZVFHMIN-NEXT: addi sp, sp, 16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
-; ZVFHMIN-NEXT: ret
- %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
- %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
- %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
- %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
- %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %negva, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+; ZVFHMIN32-LABEL: vfnmadd_vv_nxv32f16:
+; ZVFHMIN32: # %bb.0:
+; ZVFHMIN32-NEXT: addi sp, sp, -16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 5
+; ZVFHMIN32-NEXT: sub sp, sp, a2
+; ZVFHMIN32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT: vmv1r.v v3, v0
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: mv a3, a2
+; ZVFHMIN32-NEXT: slli a2, a2, 1
+; ZVFHMIN32-NEXT: add a2, a2, a3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vl8re16.v v8, (a0)
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: lui a2, 8
+; ZVFHMIN32-NEXT: csrr a3, vlenb
+; ZVFHMIN32-NEXT: vxor.vx v8, v16, a2, v0.t
+; ZVFHMIN32-NEXT: slli a0, a3, 1
+; ZVFHMIN32-NEXT: srli a3, a3, 2
+; ZVFHMIN32-NEXT: sub a4, a1, a0
+; ZVFHMIN32-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT: vslidedown.vx v6, v0, a3
+; ZVFHMIN32-NEXT: sltu a3, a1, a4
+; ZVFHMIN32-NEXT: csrr a5, vlenb
+; ZVFHMIN32-NEXT: slli a5, a5, 4
+; ZVFHMIN32-NEXT: add a5, sp, a5
+; ZVFHMIN32-NEXT: addi a5, a5, 16
+; ZVFHMIN32-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT: vxor.vx v24, v24, a2, v0.t
+; ZVFHMIN32-NEXT: addi a3, a3, -1
+; ZVFHMIN32-NEXT: and a3, a3, a4
+; ZVFHMIN32-NEXT: vmv1r.v v0, v6
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v16, v28, v0.t
+; ZVFHMIN32-NEXT: addi a2, sp, 16
+; ZVFHMIN32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 4
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: mv a3, a2
+; ZVFHMIN32-NEXT: slli a2, a2, 1
+; ZVFHMIN32-NEXT: add a2, a2, a3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN32-NEXT: addi a2, sp, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.f.f.w v4, v24, v0.t
+; ZVFHMIN32-NEXT: bltu a1, a0, .LBB286_2
+; ZVFHMIN32-NEXT: # %bb.1:
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: .LBB286_2:
+; ZVFHMIN32-NEXT: vmv1r.v v0, v3
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v16, v0.t
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: add a0, a0, a1
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN32-NEXT: vmv4r.v v12, v4
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.f.f.w v8, v24, v0.t
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 5
+; ZVFHMIN32-NEXT: add sp, sp, a0
+; ZVFHMIN32-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT: addi sp, sp, 16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT: ret
+;
+; ZVFHMIN64-LABEL: vfnmadd_vv_nxv32f16:
+; ZVFHMIN64: # %bb.0:
+; ZVFHMIN64-NEXT: addi sp, sp, -16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 5
+; ZVFHMIN64-NEXT: sub sp, sp, a2
+; ZVFHMIN64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT: vmv1r.v v3, v0
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: mv a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 1
+; ZVFHMIN64-NEXT: add a2, a2, a3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vl8re16.v v8, (a0)
+; ZVFHMIN64-NEXT: lui a2, 8
+; ZVFHMIN64-NEXT: csrr a3, vlenb
+; ZVFHMIN64-NEXT: vxor.vx v16, v16, a2, v0.t
+; ZVFHMIN64-NEXT: slli a0, a3, 1
+; ZVFHMIN64-NEXT: srli a3, a3, 2
+; ZVFHMIN64-NEXT: sub a4, a1, a0
+; ZVFHMIN64-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT: vslidedown.vx v6, v0, a3
+; ZVFHMIN64-NEXT: sltu a3, a1, a4
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT: vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN64-NEXT: addi a3, a3, -1
+; ZVFHMIN64-NEXT: and a3, a3, a4
+; ZVFHMIN64-NEXT: vmv1r.v v0, v6
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN64-NEXT: addi a2, sp, 16
+; ZVFHMIN64-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 4
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: mv a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 1
+; ZVFHMIN64-NEXT: add a2, a2, a3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN64-NEXT: addi a2, sp, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.f.f.w v4, v24, v0.t
+; ZVFHMIN64-NEXT: bltu a1, a0, .LBB286_2
+; ZVFHMIN64-NEXT: # %bb.1:
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: .LBB286_2:
+; ZVFHMIN64-NEXT: vmv1r.v v0, v3
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v16, v0.t
+; ZVFHMIN64-NEXT: addi a0, sp, 16
+; ZVFHMIN64-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 4
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: slli a0, a0, 1
+; ZVFHMIN64-NEXT: add a0, a0, a1
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v16, v0.t
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 4
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: addi a0, sp, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 4
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vfmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN64-NEXT: vmv.v.v v16, v8
+; ZVFHMIN64-NEXT: vmv4r.v v12, v4
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 5
+; ZVFHMIN64-NEXT: add sp, sp, a0
+; ZVFHMIN64-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT: addi sp, sp, 16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT: ret
+ %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
+ %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
+ %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x half> %v
}
-define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat:
+define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vv_nxv32f16_commuted:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vl8re16.v v24, (a0)
+; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFH-NEXT: vfnmadd.vv v8, v16, v24, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN32-LABEL: vfnmadd_vv_nxv32f16_commuted:
+; ZVFHMIN32: # %bb.0:
+; ZVFHMIN32-NEXT: addi sp, sp, -16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 5
+; ZVFHMIN32-NEXT: sub sp, sp, a2
+; ZVFHMIN32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT: vmv1r.v v3, v0
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: mv a3, a2
+; ZVFHMIN32-NEXT: slli a2, a2, 1
+; ZVFHMIN32-NEXT: add a2, a2, a3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vl8re16.v v8, (a0)
+; ZVFHMIN32-NEXT: lui a2, 8
+; ZVFHMIN32-NEXT: csrr a3, vlenb
+; ZVFHMIN32-NEXT: vxor.vx v16, v16, a2, v0.t
+; ZVFHMIN32-NEXT: slli a0, a3, 1
+; ZVFHMIN32-NEXT: srli a3, a3, 2
+; ZVFHMIN32-NEXT: sub a4, a1, a0
+; ZVFHMIN32-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT: vslidedown.vx v6, v0, a3
+; ZVFHMIN32-NEXT: sltu a3, a1, a4
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT: vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN32-NEXT: addi a3, a3, -1
+; ZVFHMIN32-NEXT: and a3, a3, a4
+; ZVFHMIN32-NEXT: vmv1r.v v0, v6
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN32-NEXT: addi a2, sp, 16
+; ZVFHMIN32-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 4
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: mv a3, a2
+; ZVFHMIN32-NEXT: slli a2, a2, 1
+; ZVFHMIN32-NEXT: add a2, a2, a3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN32-NEXT: addi a2, sp, 16
+; ZVFHMIN32-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vfmadd.vv v16, v24, v8, v0.t
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.f.f.w v4, v16, v0.t
+; ZVFHMIN32-NEXT: bltu a1, a0, .LBB287_2
+; ZVFHMIN32-NEXT: # %bb.1:
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: .LBB287_2:
+; ZVFHMIN32-NEXT: vmv1r.v v0, v3
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: add a0, a0, a1
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vfmadd.vv v8, v16, v24, v0.t
+; ZVFHMIN32-NEXT: vmv.v.v v16, v8
+; ZVFHMIN32-NEXT: vmv4r.v v12, v4
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 5
+; ZVFHMIN32-NEXT: add sp, sp, a0
+; ZVFHMIN32-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT: addi sp, sp, 16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT: ret
+;
+; ZVFHMIN64-LABEL: vfnmadd_vv_nxv32f16_commuted:
+; ZVFHMIN64: # %bb.0:
+; ZVFHMIN64-NEXT: addi sp, sp, -16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 5
+; ZVFHMIN64-NEXT: sub sp, sp, a2
+; ZVFHMIN64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT: vmv1r.v v3, v0
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: mv a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 1
+; ZVFHMIN64-NEXT: add a2, a2, a3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vl8re16.v v8, (a0)
+; ZVFHMIN64-NEXT: lui a2, 8
+; ZVFHMIN64-NEXT: csrr a3, vlenb
+; ZVFHMIN64-NEXT: vxor.vx v16, v16, a2, v0.t
+; ZVFHMIN64-NEXT: slli a0, a3, 1
+; ZVFHMIN64-NEXT: srli a3, a3, 2
+; ZVFHMIN64-NEXT: sub a4, a1, a0
+; ZVFHMIN64-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT: vslidedown.vx v6, v0, a3
+; ZVFHMIN64-NEXT: sltu a3, a1, a4
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT: vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN64-NEXT: addi a3, a3, -1
+; ZVFHMIN64-NEXT: and a3, a3, a4
+; ZVFHMIN64-NEXT: vmv1r.v v0, v6
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN64-NEXT: addi a2, sp, 16
+; ZVFHMIN64-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 4
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: mv a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 1
+; ZVFHMIN64-NEXT: add a2, a2, a3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN64-NEXT: addi a2, sp, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vfmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.f.f.w v4, v8, v0.t
+; ZVFHMIN64-NEXT: bltu a1, a0, .LBB287_2
+; ZVFHMIN64-NEXT: # %bb.1:
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: .LBB287_2:
+; ZVFHMIN64-NEXT: vmv1r.v v0, v3
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN64-NEXT: addi a0, sp, 16
+; ZVFHMIN64-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 4
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: slli a0, a0, 1
+; ZVFHMIN64-NEXT: add a0, a0, a1
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN64-NEXT: addi a0, sp, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vfmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN64-NEXT: vmv.v.v v16, v8
+; ZVFHMIN64-NEXT: vmv4r.v v12, v4
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 5
+; ZVFHMIN64-NEXT: add sp, sp, a0
+; ZVFHMIN64-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT: addi sp, sp, 16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT: ret
+ %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
+ %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
+ %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x half> %v
+}
+
+define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vv_nxv32f16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vl8re16.v v24, (a0)
+; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFH-NEXT: vfnmadd.vv v8, v16, v24
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN32-LABEL: vfnmadd_vv_nxv32f16_unmasked:
+; ZVFHMIN32: # %bb.0:
+; ZVFHMIN32-NEXT: addi sp, sp, -16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 5
+; ZVFHMIN32-NEXT: sub sp, sp, a2
+; ZVFHMIN32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: mv a3, a2
+; ZVFHMIN32-NEXT: slli a2, a2, 1
+; ZVFHMIN32-NEXT: add a2, a2, a3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vl8re16.v v24, (a0)
+; ZVFHMIN32-NEXT: lui a2, 8
+; ZVFHMIN32-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; ZVFHMIN32-NEXT: vmset.m v8
+; ZVFHMIN32-NEXT: csrr a3, vlenb
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT: vxor.vx v16, v16, a2
+; ZVFHMIN32-NEXT: slli a0, a3, 1
+; ZVFHMIN32-NEXT: srli a3, a3, 2
+; ZVFHMIN32-NEXT: sub a4, a1, a0
+; ZVFHMIN32-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT: vslidedown.vx v0, v8, a3
+; ZVFHMIN32-NEXT: sltu a3, a1, a4
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT: vxor.vx v8, v24, a2
+; ZVFHMIN32-NEXT: addi a3, a3, -1
+; ZVFHMIN32-NEXT: and a3, a3, a4
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN32-NEXT: addi a2, sp, 16
+; ZVFHMIN32-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 4
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: mv a3, a2
+; ZVFHMIN32-NEXT: slli a2, a2, 1
+; ZVFHMIN32-NEXT: add a2, a2, a3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN32-NEXT: addi a2, sp, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.f.f.w v12, v24, v0.t
+; ZVFHMIN32-NEXT: bltu a1, a0, .LBB288_2
+; ZVFHMIN32-NEXT: # %bb.1:
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: .LBB288_2:
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v0, v16
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: add a0, a0, a1
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vfmadd.vv v16, v24, v0
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 5
+; ZVFHMIN32-NEXT: add sp, sp, a0
+; ZVFHMIN32-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT: addi sp, sp, 16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT: ret
+;
+; ZVFHMIN64-LABEL: vfnmadd_vv_nxv32f16_unmasked:
+; ZVFHMIN64: # %bb.0:
+; ZVFHMIN64-NEXT: addi sp, sp, -16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 5
+; ZVFHMIN64-NEXT: sub sp, sp, a2
+; ZVFHMIN64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: mv a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 1
+; ZVFHMIN64-NEXT: add a2, a2, a3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vl8re16.v v24, (a0)
+; ZVFHMIN64-NEXT: lui a2, 8
+; ZVFHMIN64-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; ZVFHMIN64-NEXT: vmset.m v8
+; ZVFHMIN64-NEXT: csrr a3, vlenb
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT: vxor.vx v16, v16, a2
+; ZVFHMIN64-NEXT: slli a0, a3, 1
+; ZVFHMIN64-NEXT: srli a3, a3, 2
+; ZVFHMIN64-NEXT: sub a4, a1, a0
+; ZVFHMIN64-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT: vslidedown.vx v0, v8, a3
+; ZVFHMIN64-NEXT: sltu a3, a1, a4
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT: vxor.vx v8, v24, a2
+; ZVFHMIN64-NEXT: addi a3, a3, -1
+; ZVFHMIN64-NEXT: and a3, a3, a4
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN64-NEXT: addi a2, sp, 16
+; ZVFHMIN64-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 4
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: mv a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 1
+; ZVFHMIN64-NEXT: add a2, a2, a3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN64-NEXT: addi a2, sp, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.f.f.w v12, v24, v0.t
+; ZVFHMIN64-NEXT: bltu a1, a0, .LBB288_2
+; ZVFHMIN64-NEXT: # %bb.1:
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: .LBB288_2:
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN64-NEXT: addi a0, sp, 16
+; ZVFHMIN64-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 4
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: slli a0, a0, 1
+; ZVFHMIN64-NEXT: add a0, a0, a1
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v0, v24
+; ZVFHMIN64-NEXT: addi a0, sp, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vfmadd.vv v0, v16, v24
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 5
+; ZVFHMIN64-NEXT: add sp, sp, a0
+; ZVFHMIN64-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT: addi sp, sp, 16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT: ret
+ %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ ret <vscale x 32 x half> %v
+}
+
+define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vv_nxv32f16_unmasked_commuted:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vl8re16.v v24, (a0)
+; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFH-NEXT: vfnmadd.vv v8, v16, v24
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN32-LABEL: vfnmadd_vv_nxv32f16_unmasked_commuted:
+; ZVFHMIN32: # %bb.0:
+; ZVFHMIN32-NEXT: addi sp, sp, -16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 5
+; ZVFHMIN32-NEXT: sub sp, sp, a2
+; ZVFHMIN32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: mv a3, a2
+; ZVFHMIN32-NEXT: slli a2, a2, 1
+; ZVFHMIN32-NEXT: add a2, a2, a3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vl8re16.v v24, (a0)
+; ZVFHMIN32-NEXT: lui a2, 8
+; ZVFHMIN32-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; ZVFHMIN32-NEXT: vmset.m v8
+; ZVFHMIN32-NEXT: csrr a3, vlenb
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT: vxor.vx v16, v16, a2
+; ZVFHMIN32-NEXT: slli a0, a3, 1
+; ZVFHMIN32-NEXT: srli a3, a3, 2
+; ZVFHMIN32-NEXT: sub a4, a1, a0
+; ZVFHMIN32-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT: vslidedown.vx v0, v8, a3
+; ZVFHMIN32-NEXT: sltu a3, a1, a4
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT: vxor.vx v8, v24, a2
+; ZVFHMIN32-NEXT: addi a3, a3, -1
+; ZVFHMIN32-NEXT: and a3, a3, a4
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN32-NEXT: addi a2, sp, 16
+; ZVFHMIN32-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 4
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: mv a3, a2
+; ZVFHMIN32-NEXT: slli a2, a2, 1
+; ZVFHMIN32-NEXT: add a2, a2, a3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN32-NEXT: addi a2, sp, 16
+; ZVFHMIN32-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vfmadd.vv v16, v24, v8, v0.t
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFHMIN32-NEXT: bltu a1, a0, .LBB289_2
+; ZVFHMIN32-NEXT: # %bb.1:
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: .LBB289_2:
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: add a0, a0, a1
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v0, v16
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vfmadd.vv v0, v16, v24
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 5
+; ZVFHMIN32-NEXT: add sp, sp, a0
+; ZVFHMIN32-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT: addi sp, sp, 16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT: ret
+;
+; ZVFHMIN64-LABEL: vfnmadd_vv_nxv32f16_unmasked_commuted:
+; ZVFHMIN64: # %bb.0:
+; ZVFHMIN64-NEXT: addi sp, sp, -16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 5
+; ZVFHMIN64-NEXT: sub sp, sp, a2
+; ZVFHMIN64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: mv a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 1
+; ZVFHMIN64-NEXT: add a2, a2, a3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vl8re16.v v24, (a0)
+; ZVFHMIN64-NEXT: lui a2, 8
+; ZVFHMIN64-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; ZVFHMIN64-NEXT: vmset.m v7
+; ZVFHMIN64-NEXT: csrr a3, vlenb
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT: vxor.vx v8, v16, a2
+; ZVFHMIN64-NEXT: slli a0, a3, 1
+; ZVFHMIN64-NEXT: srli a3, a3, 2
+; ZVFHMIN64-NEXT: sub a4, a1, a0
+; ZVFHMIN64-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT: vslidedown.vx v0, v7, a3
+; ZVFHMIN64-NEXT: sltu a3, a1, a4
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT: vxor.vx v16, v24, a2
+; ZVFHMIN64-NEXT: addi a3, a3, -1
+; ZVFHMIN64-NEXT: and a3, a3, a4
+; ZVFHMIN64-NEXT: addi a2, sp, 16
+; ZVFHMIN64-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 4
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: mv a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 1
+; ZVFHMIN64-NEXT: add a2, a2, a3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vfmadd.vv v24, v16, v8, v0.t
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.f.f.w v12, v24, v0.t
+; ZVFHMIN64-NEXT: bltu a1, a0, .LBB289_2
+; ZVFHMIN64-NEXT: # %bb.1:
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: .LBB289_2:
+; ZVFHMIN64-NEXT: addi a0, sp, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 4
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: slli a0, a0, 1
+; ZVFHMIN64-NEXT: add a0, a0, a1
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v0, v24
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vfmadd.vv v0, v16, v24
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 5
+; ZVFHMIN64-NEXT: add sp, sp, a0
+; ZVFHMIN64-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT: addi sp, sp, 16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT: ret
+ %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ ret <vscale x 32 x half> %v
+}
+
+define <vscale x 32 x half> @vfnmadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vf_nxv32f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16, v0.t
; ZVFH-NEXT: ret
;
-; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat:
+; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
@@ -9818,24 +9752,25 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half>
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v3, v0
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: lui a2, 8
+; ZVFHMIN-NEXT: csrr a3, vlenb
+; ZVFHMIN-NEXT: vmv.v.x v24, a1
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: mv a2, a1
+; ZVFHMIN-NEXT: mv a4, a1
; ZVFHMIN-NEXT: slli a1, a1, 1
-; ZVFHMIN-NEXT: add a1, a1, a2
+; ZVFHMIN-NEXT: add a1, a1, a4
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: lui a2, 8
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: vmv.v.x v24, a1
+; ZVFHMIN-NEXT: vs8r.v v24, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN-NEXT: vxor.vx v16, v16, a2, v0.t
; ZVFHMIN-NEXT: slli a1, a3, 1
; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: vxor.vx v8, v24, a2, v0.t
-; ZVFHMIN-NEXT: vxor.vx v16, v16, a2, v0.t
; ZVFHMIN-NEXT: sub a2, a0, a1
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a3
@@ -9856,7 +9791,7 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half>
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
; ZVFHMIN-NEXT: mv a3, a2
@@ -9864,34 +9799,39 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half>
; ZVFHMIN-NEXT: add a2, a2, a3
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28, v0.t
+; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t
+; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v16, v0.t
-; ZVFHMIN-NEXT: bltu a0, a1, .LBB294_2
+; ZVFHMIN-NEXT: bltu a0, a1, .LBB290_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
-; ZVFHMIN-NEXT: .LBB294_2:
+; ZVFHMIN-NEXT: .LBB290_2:
; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: mv a1, a0
@@ -9900,16 +9840,11 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half>
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16, v0.t
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
@@ -9928,20 +9863,20 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half>
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
- %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
%negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
- %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negvb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
+ %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negva, <vscale x 32 x half> %vb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x half> %v
}
-define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat_commute:
+define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vf_nxv32f16_commute:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16, v0.t
; ZVFH-NEXT: ret
;
-; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat_commute:
+; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_commute:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
@@ -9949,24 +9884,25 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_commute(<vscale x 32
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v3, v0
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: lui a2, 8
+; ZVFHMIN-NEXT: csrr a3, vlenb
+; ZVFHMIN-NEXT: vmv.v.x v24, a1
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: mv a2, a1
+; ZVFHMIN-NEXT: mv a4, a1
; ZVFHMIN-NEXT: slli a1, a1, 1
-; ZVFHMIN-NEXT: add a1, a1, a2
+; ZVFHMIN-NEXT: add a1, a1, a4
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: lui a2, 8
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: vmv.v.x v24, a1
+; ZVFHMIN-NEXT: vs8r.v v24, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN-NEXT: vxor.vx v16, v16, a2, v0.t
; ZVFHMIN-NEXT: slli a1, a3, 1
; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: vxor.vx v8, v24, a2, v0.t
-; ZVFHMIN-NEXT: vxor.vx v16, v16, a2, v0.t
; ZVFHMIN-NEXT: sub a2, a0, a1
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a3
@@ -10000,34 +9936,29 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_commute(<vscale x 32
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t
+; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v8, v0.t
-; ZVFHMIN-NEXT: bltu a0, a1, .LBB295_2
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v16, v0.t
+; ZVFHMIN-NEXT: bltu a0, a1, .LBB291_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
-; ZVFHMIN-NEXT: .LBB295_2:
+; ZVFHMIN-NEXT: .LBB291_2:
; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16, v0.t
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16, v0.t
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: mv a1, a0
@@ -10035,17 +9966,22 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_commute(<vscale x 32
; ZVFHMIN-NEXT: add a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24, v0.t
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16, v0.t
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t
+; ZVFHMIN-NEXT: vfmadd.vv v8, v24, v16, v0.t
; ZVFHMIN-NEXT: vmv.v.v v16, v8
; ZVFHMIN-NEXT: vmv4r.v v12, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
@@ -10059,118 +9995,134 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_commute(<vscale x 32
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
- %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
%negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
- %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negvb, <vscale x 32 x half> %va, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
+ %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %negva, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x half> %v
}
-define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked:
+define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vf_nxv32f16_unmasked:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16
; ZVFH-NEXT: ret
;
-; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked:
+; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 5
-; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: mv a2, a1
-; ZVFHMIN-NEXT: slli a1, a1, 1
+; ZVFHMIN-NEXT: slli a1, a1, 2
; ZVFHMIN-NEXT: add a1, a1, a2
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: lui a2, 8
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
+; ZVFHMIN-NEXT: fmv.x.h a2, fa0
+; ZVFHMIN-NEXT: lui a1, 8
; ZVFHMIN-NEXT: vsetvli a3, zero, e8, m4, ta, ma
; ZVFHMIN-NEXT: vmset.m v7
; ZVFHMIN-NEXT: csrr a3, vlenb
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vmv.v.x v24, a1
-; ZVFHMIN-NEXT: vxor.vx v16, v16, a2
+; ZVFHMIN-NEXT: vxor.vx v24, v8, a1
+; ZVFHMIN-NEXT: vxor.vx v8, v16, a1
; ZVFHMIN-NEXT: slli a1, a3, 1
; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: vxor.vx v8, v24, a2
-; ZVFHMIN-NEXT: sub a2, a0, a1
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: sub a4, a0, a1
+; ZVFHMIN-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a3
-; ZVFHMIN-NEXT: sltu a3, a0, a2
+; ZVFHMIN-NEXT: sltu a3, a0, a4
; ZVFHMIN-NEXT: addi a3, a3, -1
-; ZVFHMIN-NEXT: and a2, a3, a2
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: slli a3, a3, 4
-; ZVFHMIN-NEXT: add a3, sp, a3
-; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN-NEXT: and a3, a3, a4
+; ZVFHMIN-NEXT: csrr a4, vlenb
+; ZVFHMIN-NEXT: slli a4, a4, 3
+; ZVFHMIN-NEXT: mv a5, a4
+; ZVFHMIN-NEXT: slli a4, a4, 1
+; ZVFHMIN-NEXT: add a4, a4, a5
+; ZVFHMIN-NEXT: add a4, sp, a4
+; ZVFHMIN-NEXT: addi a4, a4, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT: csrr a4, vlenb
+; ZVFHMIN-NEXT: slli a4, a4, 4
+; ZVFHMIN-NEXT: add a4, sp, a4
+; ZVFHMIN-NEXT: addi a4, a4, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: csrr a4, vlenb
+; ZVFHMIN-NEXT: slli a4, a4, 5
+; ZVFHMIN-NEXT: add a4, sp, a4
+; ZVFHMIN-NEXT: addi a4, a4, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28, v0.t
+; ZVFHMIN-NEXT: csrr a4, vlenb
+; ZVFHMIN-NEXT: slli a4, a4, 3
+; ZVFHMIN-NEXT: add a4, sp, a4
+; ZVFHMIN-NEXT: addi a4, a4, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v8, a2
; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
+; ZVFHMIN-NEXT: slli a2, a2, 4
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: mv a3, a2
-; ZVFHMIN-NEXT: slli a2, a2, 1
-; ZVFHMIN-NEXT: add a2, a2, a3
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT: bltu a0, a1, .LBB296_2
+; ZVFHMIN-NEXT: bltu a0, a1, .LBB292_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
-; ZVFHMIN-NEXT: .LBB296_2:
+; ZVFHMIN-NEXT: .LBB292_2:
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 4
+; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: mv a2, a1
+; ZVFHMIN-NEXT: slli a1, a1, 1
+; ZVFHMIN-NEXT: add a1, a1, a2
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24
+; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: add a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v16
+; ZVFHMIN-NEXT: vfmadd.vv v24, v0, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: mv a1, a0
+; ZVFHMIN-NEXT: slli a0, a0, 2
+; ZVFHMIN-NEXT: add a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
@@ -10178,20 +10130,20 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked(<vscale x 32
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
- %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
%negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
- %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negvb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negva, <vscale x 32 x half> %vb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
ret <vscale x 32 x half> %v
}
-define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute:
+define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vf_nxv32f16_unmasked_commute:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16
; ZVFH-NEXT: ret
;
-; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute:
+; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_unmasked_commute:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
@@ -10199,80 +10151,74 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vsc
; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: mv a2, a1
-; ZVFHMIN-NEXT: slli a1, a1, 1
-; ZVFHMIN-NEXT: add a1, a1, a2
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: lui a2, 8
+; ZVFHMIN-NEXT: fmv.x.h a2, fa0
+; ZVFHMIN-NEXT: lui a1, 8
; ZVFHMIN-NEXT: vsetvli a3, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmset.m v7
+; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: csrr a3, vlenb
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vmv.v.x v24, a1
-; ZVFHMIN-NEXT: vxor.vx v16, v16, a2
+; ZVFHMIN-NEXT: vxor.vx v8, v8, a1
+; ZVFHMIN-NEXT: vxor.vx v16, v16, a1
; ZVFHMIN-NEXT: slli a1, a3, 1
; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: vxor.vx v8, v24, a2
-; ZVFHMIN-NEXT: sub a2, a0, a1
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a3
-; ZVFHMIN-NEXT: sltu a3, a0, a2
+; ZVFHMIN-NEXT: sub a4, a0, a1
+; ZVFHMIN-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a3
+; ZVFHMIN-NEXT: sltu a3, a0, a4
; ZVFHMIN-NEXT: addi a3, a3, -1
-; ZVFHMIN-NEXT: and a2, a3, a2
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: slli a3, a3, 4
-; ZVFHMIN-NEXT: add a3, sp, a3
-; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: and a3, a3, a4
+; ZVFHMIN-NEXT: csrr a4, vlenb
+; ZVFHMIN-NEXT: slli a4, a4, 4
+; ZVFHMIN-NEXT: add a4, sp, a4
+; ZVFHMIN-NEXT: addi a4, a4, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN-NEXT: csrr a4, vlenb
+; ZVFHMIN-NEXT: slli a4, a4, 3
+; ZVFHMIN-NEXT: add a4, sp, a4
+; ZVFHMIN-NEXT: addi a4, a4, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: csrr a4, vlenb
+; ZVFHMIN-NEXT: slli a4, a4, 3
+; ZVFHMIN-NEXT: mv a5, a4
+; ZVFHMIN-NEXT: slli a4, a4, 1
+; ZVFHMIN-NEXT: add a4, a4, a5
+; ZVFHMIN-NEXT: add a4, sp, a4
+; ZVFHMIN-NEXT: addi a4, a4, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v8, a2
; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: mv a3, a2
-; ZVFHMIN-NEXT: slli a2, a2, 1
-; ZVFHMIN-NEXT: add a2, a2, a3
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8, v0.t
-; ZVFHMIN-NEXT: bltu a0, a1, .LBB297_2
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFHMIN-NEXT: bltu a0, a1, .LBB293_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
-; ZVFHMIN-NEXT: .LBB297_2:
+; ZVFHMIN-NEXT: .LBB293_2:
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFHMIN-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: mv a1, a0
@@ -10280,15 +10226,20 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vsc
; ZVFHMIN-NEXT: add a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v8
+; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v0
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: add sp, sp, a0
@@ -10298,79 +10249,66 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vsc
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
- %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
%negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
- %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negvb, <vscale x 32 x half> %va, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %negva, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
ret <vscale x 32 x half> %v
}
-define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmsub_vv_nxv32f16:
+define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vl8re16.v v24, (a0)
-; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFH-NEXT: vfnmadd.vv v16, v8, v24, v0.t
-; ZVFH-NEXT: vmv.v.v v8, v16
+; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16, v0.t
; ZVFH-NEXT: ret
-;
-; ZVFHMIN-LABEL: vfnmsub_vv_nxv32f16:
-; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: addi sp, sp, -16
-; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 5
-; ZVFHMIN-NEXT: sub sp, sp, a2
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v3, v0
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: mv a3, a2
-; ZVFHMIN-NEXT: slli a2, a2, 1
-; ZVFHMIN-NEXT: add a2, a2, a3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vl8re16.v v8, (a0)
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+;
+; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 5
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmv1r.v v3, v0
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: mv a2, a1
+; ZVFHMIN-NEXT: slli a1, a1, 1
+; ZVFHMIN-NEXT: add a1, a1, a2
+; ZVFHMIN-NEXT: add a1, sp, a1
+; ZVFHMIN-NEXT: addi a1, a1, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: lui a2, 8
; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: vxor.vx v8, v16, a2, v0.t
-; ZVFHMIN-NEXT: slli a0, a3, 1
+; ZVFHMIN-NEXT: vmv.v.x v24, a1
+; ZVFHMIN-NEXT: slli a1, a3, 1
; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: sub a4, a1, a0
-; ZVFHMIN-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
-; ZVFHMIN-NEXT: sltu a3, a1, a4
-; ZVFHMIN-NEXT: csrr a5, vlenb
-; ZVFHMIN-NEXT: slli a5, a5, 4
-; ZVFHMIN-NEXT: add a5, sp, a5
-; ZVFHMIN-NEXT: addi a5, a5, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vxor.vx v24, v24, a2, v0.t
+; ZVFHMIN-NEXT: vxor.vx v8, v24, a2, v0.t
+; ZVFHMIN-NEXT: vxor.vx v16, v16, a2, v0.t
+; ZVFHMIN-NEXT: sub a2, a0, a1
+; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a3
+; ZVFHMIN-NEXT: sltu a3, a0, a2
; ZVFHMIN-NEXT: addi a3, a3, -1
-; ZVFHMIN-NEXT: and a3, a3, a4
-; ZVFHMIN-NEXT: vmv1r.v v0, v6
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28, v0.t
+; ZVFHMIN-NEXT: and a2, a3, a2
+; ZVFHMIN-NEXT: csrr a3, vlenb
+; ZVFHMIN-NEXT: slli a3, a3, 3
+; ZVFHMIN-NEXT: add a3, sp, a3
+; ZVFHMIN-NEXT: addi a3, a3, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t
; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 4
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
; ZVFHMIN-NEXT: mv a3, a2
@@ -10378,30 +10316,39 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: add a2, a2, a3
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28, v0.t
; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24, v0.t
-; ZVFHMIN-NEXT: bltu a1, a0, .LBB298_2
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v16, v0.t
+; ZVFHMIN-NEXT: bltu a0, a1, .LBB294_2
; ZVFHMIN-NEXT: # %bb.1:
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: .LBB298_2:
+; ZVFHMIN-NEXT: mv a0, a1
+; ZVFHMIN-NEXT: .LBB294_2:
; ZVFHMIN-NEXT: vmv1r.v v0, v3
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: add a1, sp, a1
+; ZVFHMIN-NEXT: addi a1, a1, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16, v0.t
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: mv a1, a0
+; ZVFHMIN-NEXT: slli a0, a0, 1
+; ZVFHMIN-NEXT: add a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
@@ -10411,27 +10358,19 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: slli a0, a0, 1
-; ZVFHMIN-NEXT: add a0, a0, a1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16, v0.t
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN-NEXT: vfmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN-NEXT: vmv.v.v v16, v8
; ZVFHMIN-NEXT: vmv4r.v v12, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24, v0.t
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16, v0.t
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: add sp, sp, a0
@@ -10439,68 +10378,68 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
; ZVFHMIN-NEXT: ret
- %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
- %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
- %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+ %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
+ %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
+ %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
+ %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negvb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x half> %v
}
-define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmsub_vv_nxv32f16_commuted:
+define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat_commute:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vl8re16.v v24, (a0)
-; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFH-NEXT: vfnmadd.vv v8, v16, v24, v0.t
+; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16, v0.t
; ZVFH-NEXT: ret
;
-; ZVFHMIN-LABEL: vfnmsub_vv_nxv32f16_commuted:
+; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat_commute:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 5
-; ZVFHMIN-NEXT: sub sp, sp, a2
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 5
+; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v3, v0
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: mv a3, a2
-; ZVFHMIN-NEXT: slli a2, a2, 1
-; ZVFHMIN-NEXT: add a2, a2, a3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vl8re16.v v8, (a0)
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: mv a2, a1
+; ZVFHMIN-NEXT: slli a1, a1, 1
+; ZVFHMIN-NEXT: add a1, a1, a2
+; ZVFHMIN-NEXT: add a1, sp, a1
+; ZVFHMIN-NEXT: addi a1, a1, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: lui a2, 8
; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: vxor.vx v16, v16, a2, v0.t
-; ZVFHMIN-NEXT: slli a0, a3, 1
+; ZVFHMIN-NEXT: vmv.v.x v24, a1
+; ZVFHMIN-NEXT: slli a1, a3, 1
; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: sub a4, a1, a0
-; ZVFHMIN-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
-; ZVFHMIN-NEXT: sltu a3, a1, a4
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFHMIN-NEXT: vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN-NEXT: vxor.vx v8, v24, a2, v0.t
+; ZVFHMIN-NEXT: vxor.vx v16, v16, a2, v0.t
+; ZVFHMIN-NEXT: sub a2, a0, a1
+; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a3
+; ZVFHMIN-NEXT: sltu a3, a0, a2
; ZVFHMIN-NEXT: addi a3, a3, -1
-; ZVFHMIN-NEXT: and a3, a3, a4
-; ZVFHMIN-NEXT: vmv1r.v v0, v6
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN-NEXT: and a2, a3, a2
+; ZVFHMIN-NEXT: csrr a3, vlenb
+; ZVFHMIN-NEXT: slli a3, a3, 3
+; ZVFHMIN-NEXT: add a3, sp, a3
+; ZVFHMIN-NEXT: addi a3, a3, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 4
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
; ZVFHMIN-NEXT: mv a3, a2
@@ -10508,38 +10447,38 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_commuted(<vscale x 32 x half> %
; ZVFHMIN-NEXT: add a2, a2, a3
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28, v0.t
; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t
+; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v16, v0.t
-; ZVFHMIN-NEXT: bltu a1, a0, .LBB299_2
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v8, v0.t
+; ZVFHMIN-NEXT: bltu a0, a1, .LBB295_2
; ZVFHMIN-NEXT: # %bb.1:
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: .LBB299_2:
+; ZVFHMIN-NEXT: mv a0, a1
+; ZVFHMIN-NEXT: .LBB295_2:
; ZVFHMIN-NEXT: vmv1r.v v0, v3
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: add a1, sp, a1
+; ZVFHMIN-NEXT: addi a1, a1, 16
+; ZVFHMIN-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
@@ -10548,15 +10487,15 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_commuted(<vscale x 32 x half> %
; ZVFHMIN-NEXT: add a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t
; ZVFHMIN-NEXT: vmv.v.v v16, v8
@@ -10570,68 +10509,69 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_commuted(<vscale x 32 x half> %
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
; ZVFHMIN-NEXT: ret
- %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
- %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
- %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+ %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
+ %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
+ %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
+ %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negvb, <vscale x 32 x half> %va, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x half> %v
}
-define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmsub_vv_nxv32f16_unmasked:
+define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vl8re16.v v24, (a0)
-; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFH-NEXT: vfnmadd.vv v8, v16, v24
+; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16
; ZVFH-NEXT: ret
;
-; ZVFHMIN-LABEL: vfnmsub_vv_nxv32f16_unmasked:
+; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 5
-; ZVFHMIN-NEXT: sub sp, sp, a2
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 5
+; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: mv a3, a2
-; ZVFHMIN-NEXT: slli a2, a2, 1
-; ZVFHMIN-NEXT: add a2, a2, a3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: mv a2, a1
+; ZVFHMIN-NEXT: slli a1, a1, 1
+; ZVFHMIN-NEXT: add a1, a1, a2
+; ZVFHMIN-NEXT: add a1, sp, a1
+; ZVFHMIN-NEXT: addi a1, a1, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: lui a2, 8
-; ZVFHMIN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmset.m v8
+; ZVFHMIN-NEXT: vsetvli a3, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT: vmset.m v7
; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v24, a1
; ZVFHMIN-NEXT: vxor.vx v16, v16, a2
-; ZVFHMIN-NEXT: slli a0, a3, 1
+; ZVFHMIN-NEXT: slli a1, a3, 1
; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: sub a4, a1, a0
-; ZVFHMIN-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v8, a3
-; ZVFHMIN-NEXT: sltu a3, a1, a4
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; ZVFHMIN-NEXT: vxor.vx v8, v24, a2
+; ZVFHMIN-NEXT: sub a2, a0, a1
+; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a3
+; ZVFHMIN-NEXT: sltu a3, a0, a2
; ZVFHMIN-NEXT: addi a3, a3, -1
-; ZVFHMIN-NEXT: and a3, a3, a4
+; ZVFHMIN-NEXT: and a2, a3, a2
+; ZVFHMIN-NEXT: csrr a3, vlenb
+; ZVFHMIN-NEXT: slli a3, a3, 4
+; ZVFHMIN-NEXT: add a3, sp, a3
+; ZVFHMIN-NEXT: addi a3, a3, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 4
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
; ZVFHMIN-NEXT: mv a3, a2
@@ -10647,16 +10587,16 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %
; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT: bltu a1, a0, .LBB300_2
+; ZVFHMIN-NEXT: bltu a0, a1, .LBB296_2
; ZVFHMIN-NEXT: # %bb.1:
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: .LBB300_2:
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN-NEXT: mv a0, a1
+; ZVFHMIN-NEXT: .LBB296_2:
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 4
+; ZVFHMIN-NEXT: add a1, sp, a1
+; ZVFHMIN-NEXT: addi a1, a1, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
@@ -10665,7 +10605,7 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: mv a1, a0
@@ -10673,14 +10613,14 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %
; ZVFHMIN-NEXT: add a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v0
+; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: add sp, sp, a0
@@ -10688,68 +10628,69 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
; ZVFHMIN-NEXT: ret
- %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
- %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
- %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
+ %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
+ %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negvb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
ret <vscale x 32 x half> %v
}
-define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) {
-; ZVFH-LABEL: vfnmsub_vv_nxv32f16_unmasked_commuted:
+define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vscale x 32 x half> %va, half %b, <vscale x 32 x half> %vc, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vl8re16.v v24, (a0)
-; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; ZVFH-NEXT: vfnmadd.vv v8, v16, v24
+; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; ZVFH-NEXT: vfnmadd.vf v8, fa0, v16
; ZVFH-NEXT: ret
;
-; ZVFHMIN-LABEL: vfnmsub_vv_nxv32f16_unmasked_commuted:
+; ZVFHMIN-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 5
-; ZVFHMIN-NEXT: sub sp, sp, a2
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 5
+; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
-; ZVFHMIN-NEXT: mv a3, a2
-; ZVFHMIN-NEXT: slli a2, a2, 1
-; ZVFHMIN-NEXT: add a2, a2, a3
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: mv a2, a1
+; ZVFHMIN-NEXT: slli a1, a1, 1
+; ZVFHMIN-NEXT: add a1, a1, a2
+; ZVFHMIN-NEXT: add a1, sp, a1
+; ZVFHMIN-NEXT: addi a1, a1, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: lui a2, 8
-; ZVFHMIN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmset.m v8
+; ZVFHMIN-NEXT: vsetvli a3, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT: vmset.m v7
; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v24, a1
; ZVFHMIN-NEXT: vxor.vx v16, v16, a2
-; ZVFHMIN-NEXT: slli a0, a3, 1
+; ZVFHMIN-NEXT: slli a1, a3, 1
; ZVFHMIN-NEXT: srli a3, a3, 2
-; ZVFHMIN-NEXT: sub a4, a1, a0
-; ZVFHMIN-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v8, a3
-; ZVFHMIN-NEXT: sltu a3, a1, a4
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; ZVFHMIN-NEXT: vxor.vx v8, v24, a2
+; ZVFHMIN-NEXT: sub a2, a0, a1
+; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a3
+; ZVFHMIN-NEXT: sltu a3, a0, a2
; ZVFHMIN-NEXT: addi a3, a3, -1
-; ZVFHMIN-NEXT: and a3, a3, a4
+; ZVFHMIN-NEXT: and a2, a3, a2
+; ZVFHMIN-NEXT: csrr a3, vlenb
+; ZVFHMIN-NEXT: slli a3, a3, 4
+; ZVFHMIN-NEXT: add a3, sp, a3
+; ZVFHMIN-NEXT: addi a3, a3, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 4
-; ZVFHMIN-NEXT: add a2, sp, a2
-; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
; ZVFHMIN-NEXT: mv a3, a2
@@ -10757,33 +10698,33 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32
; ZVFHMIN-NEXT: add a2, a2, a3
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t
; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t
+; ZVFHMIN-NEXT: vfmadd.vv v8, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16, v0.t
-; ZVFHMIN-NEXT: bltu a1, a0, .LBB301_2
+; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8, v0.t
+; ZVFHMIN-NEXT: bltu a0, a1, .LBB297_2
; ZVFHMIN-NEXT: # %bb.1:
-; ZVFHMIN-NEXT: mv a1, a0
-; ZVFHMIN-NEXT: .LBB301_2:
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN-NEXT: mv a0, a1
+; ZVFHMIN-NEXT: .LBB297_2:
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 4
+; ZVFHMIN-NEXT: add a1, sp, a1
+; ZVFHMIN-NEXT: addi a1, a1, 16
+; ZVFHMIN-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: mv a1, a0
@@ -10791,14 +10732,15 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32
; ZVFHMIN-NEXT: add a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24
+; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v0
+; ZVFHMIN-NEXT: vmv8r.v v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: add sp, sp, a0
@@ -10806,6 +10748,968 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
; ZVFHMIN-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
+ %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
+ %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negvb, <vscale x 32 x half> %va, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ ret <vscale x 32 x half> %v
+}
+
+define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmsub_vv_nxv32f16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vl8re16.v v24, (a0)
+; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFH-NEXT: vfnmadd.vv v16, v8, v24, v0.t
+; ZVFH-NEXT: vmv.v.v v8, v16
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN32-LABEL: vfnmsub_vv_nxv32f16:
+; ZVFHMIN32: # %bb.0:
+; ZVFHMIN32-NEXT: addi sp, sp, -16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 5
+; ZVFHMIN32-NEXT: sub sp, sp, a2
+; ZVFHMIN32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT: vmv1r.v v3, v0
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: mv a3, a2
+; ZVFHMIN32-NEXT: slli a2, a2, 1
+; ZVFHMIN32-NEXT: add a2, a2, a3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vl8re16.v v8, (a0)
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: lui a2, 8
+; ZVFHMIN32-NEXT: csrr a3, vlenb
+; ZVFHMIN32-NEXT: vxor.vx v8, v16, a2, v0.t
+; ZVFHMIN32-NEXT: slli a0, a3, 1
+; ZVFHMIN32-NEXT: srli a3, a3, 2
+; ZVFHMIN32-NEXT: sub a4, a1, a0
+; ZVFHMIN32-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT: vslidedown.vx v6, v0, a3
+; ZVFHMIN32-NEXT: sltu a3, a1, a4
+; ZVFHMIN32-NEXT: csrr a5, vlenb
+; ZVFHMIN32-NEXT: slli a5, a5, 4
+; ZVFHMIN32-NEXT: add a5, sp, a5
+; ZVFHMIN32-NEXT: addi a5, a5, 16
+; ZVFHMIN32-NEXT: vl8r.v v24, (a5) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT: vxor.vx v24, v24, a2, v0.t
+; ZVFHMIN32-NEXT: addi a3, a3, -1
+; ZVFHMIN32-NEXT: and a3, a3, a4
+; ZVFHMIN32-NEXT: vmv1r.v v0, v6
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v16, v28, v0.t
+; ZVFHMIN32-NEXT: addi a2, sp, 16
+; ZVFHMIN32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 4
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: mv a3, a2
+; ZVFHMIN32-NEXT: slli a2, a2, 1
+; ZVFHMIN32-NEXT: add a2, a2, a3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN32-NEXT: addi a2, sp, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.f.f.w v4, v24, v0.t
+; ZVFHMIN32-NEXT: bltu a1, a0, .LBB298_2
+; ZVFHMIN32-NEXT: # %bb.1:
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: .LBB298_2:
+; ZVFHMIN32-NEXT: vmv1r.v v0, v3
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v16, v0.t
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: add a0, a0, a1
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN32-NEXT: vmv4r.v v12, v4
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.f.f.w v8, v24, v0.t
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 5
+; ZVFHMIN32-NEXT: add sp, sp, a0
+; ZVFHMIN32-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT: addi sp, sp, 16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT: ret
+;
+; ZVFHMIN64-LABEL: vfnmsub_vv_nxv32f16:
+; ZVFHMIN64: # %bb.0:
+; ZVFHMIN64-NEXT: addi sp, sp, -16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 5
+; ZVFHMIN64-NEXT: sub sp, sp, a2
+; ZVFHMIN64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT: vmv1r.v v3, v0
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: mv a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 1
+; ZVFHMIN64-NEXT: add a2, a2, a3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vl8re16.v v8, (a0)
+; ZVFHMIN64-NEXT: lui a2, 8
+; ZVFHMIN64-NEXT: csrr a3, vlenb
+; ZVFHMIN64-NEXT: vxor.vx v16, v16, a2, v0.t
+; ZVFHMIN64-NEXT: slli a0, a3, 1
+; ZVFHMIN64-NEXT: srli a3, a3, 2
+; ZVFHMIN64-NEXT: sub a4, a1, a0
+; ZVFHMIN64-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT: vslidedown.vx v6, v0, a3
+; ZVFHMIN64-NEXT: sltu a3, a1, a4
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT: vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN64-NEXT: addi a3, a3, -1
+; ZVFHMIN64-NEXT: and a3, a3, a4
+; ZVFHMIN64-NEXT: vmv1r.v v0, v6
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN64-NEXT: addi a2, sp, 16
+; ZVFHMIN64-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 4
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: mv a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 1
+; ZVFHMIN64-NEXT: add a2, a2, a3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN64-NEXT: addi a2, sp, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.f.f.w v4, v24, v0.t
+; ZVFHMIN64-NEXT: bltu a1, a0, .LBB298_2
+; ZVFHMIN64-NEXT: # %bb.1:
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: .LBB298_2:
+; ZVFHMIN64-NEXT: vmv1r.v v0, v3
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v16, v0.t
+; ZVFHMIN64-NEXT: addi a0, sp, 16
+; ZVFHMIN64-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 4
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: slli a0, a0, 1
+; ZVFHMIN64-NEXT: add a0, a0, a1
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v16, v0.t
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 4
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: addi a0, sp, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 4
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vfmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN64-NEXT: vmv.v.v v16, v8
+; ZVFHMIN64-NEXT: vmv4r.v v12, v4
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 5
+; ZVFHMIN64-NEXT: add sp, sp, a0
+; ZVFHMIN64-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT: addi sp, sp, 16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT: ret
+ %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
+ %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
+ %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x half> %v
+}
+
+define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmsub_vv_nxv32f16_commuted:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vl8re16.v v24, (a0)
+; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFH-NEXT: vfnmadd.vv v8, v16, v24, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN32-LABEL: vfnmsub_vv_nxv32f16_commuted:
+; ZVFHMIN32: # %bb.0:
+; ZVFHMIN32-NEXT: addi sp, sp, -16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 5
+; ZVFHMIN32-NEXT: sub sp, sp, a2
+; ZVFHMIN32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT: vmv1r.v v3, v0
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: mv a3, a2
+; ZVFHMIN32-NEXT: slli a2, a2, 1
+; ZVFHMIN32-NEXT: add a2, a2, a3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vl8re16.v v8, (a0)
+; ZVFHMIN32-NEXT: lui a2, 8
+; ZVFHMIN32-NEXT: csrr a3, vlenb
+; ZVFHMIN32-NEXT: vxor.vx v16, v16, a2, v0.t
+; ZVFHMIN32-NEXT: slli a0, a3, 1
+; ZVFHMIN32-NEXT: srli a3, a3, 2
+; ZVFHMIN32-NEXT: sub a4, a1, a0
+; ZVFHMIN32-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT: vslidedown.vx v6, v0, a3
+; ZVFHMIN32-NEXT: sltu a3, a1, a4
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT: vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN32-NEXT: addi a3, a3, -1
+; ZVFHMIN32-NEXT: and a3, a3, a4
+; ZVFHMIN32-NEXT: vmv1r.v v0, v6
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN32-NEXT: addi a2, sp, 16
+; ZVFHMIN32-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 4
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: mv a3, a2
+; ZVFHMIN32-NEXT: slli a2, a2, 1
+; ZVFHMIN32-NEXT: add a2, a2, a3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN32-NEXT: addi a2, sp, 16
+; ZVFHMIN32-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vfmadd.vv v16, v24, v8, v0.t
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.f.f.w v4, v16, v0.t
+; ZVFHMIN32-NEXT: bltu a1, a0, .LBB299_2
+; ZVFHMIN32-NEXT: # %bb.1:
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: .LBB299_2:
+; ZVFHMIN32-NEXT: vmv1r.v v0, v3
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: add a0, a0, a1
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vfmadd.vv v8, v16, v24, v0.t
+; ZVFHMIN32-NEXT: vmv.v.v v16, v8
+; ZVFHMIN32-NEXT: vmv4r.v v12, v4
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 5
+; ZVFHMIN32-NEXT: add sp, sp, a0
+; ZVFHMIN32-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT: addi sp, sp, 16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT: ret
+;
+; ZVFHMIN64-LABEL: vfnmsub_vv_nxv32f16_commuted:
+; ZVFHMIN64: # %bb.0:
+; ZVFHMIN64-NEXT: addi sp, sp, -16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 5
+; ZVFHMIN64-NEXT: sub sp, sp, a2
+; ZVFHMIN64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT: vmv1r.v v3, v0
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: mv a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 1
+; ZVFHMIN64-NEXT: add a2, a2, a3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vl8re16.v v8, (a0)
+; ZVFHMIN64-NEXT: lui a2, 8
+; ZVFHMIN64-NEXT: csrr a3, vlenb
+; ZVFHMIN64-NEXT: vxor.vx v16, v16, a2, v0.t
+; ZVFHMIN64-NEXT: slli a0, a3, 1
+; ZVFHMIN64-NEXT: srli a3, a3, 2
+; ZVFHMIN64-NEXT: sub a4, a1, a0
+; ZVFHMIN64-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT: vslidedown.vx v6, v0, a3
+; ZVFHMIN64-NEXT: sltu a3, a1, a4
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT: vxor.vx v8, v8, a2, v0.t
+; ZVFHMIN64-NEXT: addi a3, a3, -1
+; ZVFHMIN64-NEXT: and a3, a3, a4
+; ZVFHMIN64-NEXT: vmv1r.v v0, v6
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN64-NEXT: addi a2, sp, 16
+; ZVFHMIN64-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 4
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: mv a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 1
+; ZVFHMIN64-NEXT: add a2, a2, a3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN64-NEXT: addi a2, sp, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vfmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.f.f.w v4, v8, v0.t
+; ZVFHMIN64-NEXT: bltu a1, a0, .LBB299_2
+; ZVFHMIN64-NEXT: # %bb.1:
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: .LBB299_2:
+; ZVFHMIN64-NEXT: vmv1r.v v0, v3
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN64-NEXT: addi a0, sp, 16
+; ZVFHMIN64-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 4
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: slli a0, a0, 1
+; ZVFHMIN64-NEXT: add a0, a0, a1
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v8, v16, v0.t
+; ZVFHMIN64-NEXT: addi a0, sp, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vfmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN64-NEXT: vmv.v.v v16, v8
+; ZVFHMIN64-NEXT: vmv4r.v v12, v4
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 5
+; ZVFHMIN64-NEXT: add sp, sp, a0
+; ZVFHMIN64-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT: addi sp, sp, 16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT: ret
+ %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
+ %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
+ %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x half> %v
+}
+
+define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmsub_vv_nxv32f16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vl8re16.v v24, (a0)
+; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFH-NEXT: vfnmadd.vv v8, v16, v24
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN32-LABEL: vfnmsub_vv_nxv32f16_unmasked:
+; ZVFHMIN32: # %bb.0:
+; ZVFHMIN32-NEXT: addi sp, sp, -16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 5
+; ZVFHMIN32-NEXT: sub sp, sp, a2
+; ZVFHMIN32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: mv a3, a2
+; ZVFHMIN32-NEXT: slli a2, a2, 1
+; ZVFHMIN32-NEXT: add a2, a2, a3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vl8re16.v v24, (a0)
+; ZVFHMIN32-NEXT: lui a2, 8
+; ZVFHMIN32-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; ZVFHMIN32-NEXT: vmset.m v8
+; ZVFHMIN32-NEXT: csrr a3, vlenb
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT: vxor.vx v16, v16, a2
+; ZVFHMIN32-NEXT: slli a0, a3, 1
+; ZVFHMIN32-NEXT: srli a3, a3, 2
+; ZVFHMIN32-NEXT: sub a4, a1, a0
+; ZVFHMIN32-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT: vslidedown.vx v0, v8, a3
+; ZVFHMIN32-NEXT: sltu a3, a1, a4
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT: vxor.vx v8, v24, a2
+; ZVFHMIN32-NEXT: addi a3, a3, -1
+; ZVFHMIN32-NEXT: and a3, a3, a4
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN32-NEXT: addi a2, sp, 16
+; ZVFHMIN32-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 4
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: mv a3, a2
+; ZVFHMIN32-NEXT: slli a2, a2, 1
+; ZVFHMIN32-NEXT: add a2, a2, a3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN32-NEXT: addi a2, sp, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.f.f.w v12, v24, v0.t
+; ZVFHMIN32-NEXT: bltu a1, a0, .LBB300_2
+; ZVFHMIN32-NEXT: # %bb.1:
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: .LBB300_2:
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v0, v16
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: add a0, a0, a1
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vfmadd.vv v16, v24, v0
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 5
+; ZVFHMIN32-NEXT: add sp, sp, a0
+; ZVFHMIN32-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT: addi sp, sp, 16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT: ret
+;
+; ZVFHMIN64-LABEL: vfnmsub_vv_nxv32f16_unmasked:
+; ZVFHMIN64: # %bb.0:
+; ZVFHMIN64-NEXT: addi sp, sp, -16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 5
+; ZVFHMIN64-NEXT: sub sp, sp, a2
+; ZVFHMIN64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: mv a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 1
+; ZVFHMIN64-NEXT: add a2, a2, a3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vl8re16.v v24, (a0)
+; ZVFHMIN64-NEXT: lui a2, 8
+; ZVFHMIN64-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; ZVFHMIN64-NEXT: vmset.m v8
+; ZVFHMIN64-NEXT: csrr a3, vlenb
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT: vxor.vx v16, v16, a2
+; ZVFHMIN64-NEXT: slli a0, a3, 1
+; ZVFHMIN64-NEXT: srli a3, a3, 2
+; ZVFHMIN64-NEXT: sub a4, a1, a0
+; ZVFHMIN64-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT: vslidedown.vx v0, v8, a3
+; ZVFHMIN64-NEXT: sltu a3, a1, a4
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT: vxor.vx v8, v24, a2
+; ZVFHMIN64-NEXT: addi a3, a3, -1
+; ZVFHMIN64-NEXT: and a3, a3, a4
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN64-NEXT: addi a2, sp, 16
+; ZVFHMIN64-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 4
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: mv a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 1
+; ZVFHMIN64-NEXT: add a2, a2, a3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v8, v20, v0.t
+; ZVFHMIN64-NEXT: addi a2, sp, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.f.f.w v12, v24, v0.t
+; ZVFHMIN64-NEXT: bltu a1, a0, .LBB300_2
+; ZVFHMIN64-NEXT: # %bb.1:
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: .LBB300_2:
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN64-NEXT: addi a0, sp, 16
+; ZVFHMIN64-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 4
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: slli a0, a0, 1
+; ZVFHMIN64-NEXT: add a0, a0, a1
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v0, v24
+; ZVFHMIN64-NEXT: addi a0, sp, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vfmadd.vv v0, v16, v24
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 5
+; ZVFHMIN64-NEXT: add sp, sp, a0
+; ZVFHMIN64-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT: addi sp, sp, 16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT: ret
+ %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ ret <vscale x 32 x half> %v
+}
+
+define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, i32 zeroext %evl) {
+; ZVFH-LABEL: vfnmsub_vv_nxv32f16_unmasked_commuted:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vl8re16.v v24, (a0)
+; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFH-NEXT: vfnmadd.vv v8, v16, v24
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN32-LABEL: vfnmsub_vv_nxv32f16_unmasked_commuted:
+; ZVFHMIN32: # %bb.0:
+; ZVFHMIN32-NEXT: addi sp, sp, -16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 5
+; ZVFHMIN32-NEXT: sub sp, sp, a2
+; ZVFHMIN32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: mv a3, a2
+; ZVFHMIN32-NEXT: slli a2, a2, 1
+; ZVFHMIN32-NEXT: add a2, a2, a3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vl8re16.v v24, (a0)
+; ZVFHMIN32-NEXT: lui a2, 8
+; ZVFHMIN32-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; ZVFHMIN32-NEXT: vmset.m v8
+; ZVFHMIN32-NEXT: csrr a3, vlenb
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT: vxor.vx v16, v16, a2
+; ZVFHMIN32-NEXT: slli a0, a3, 1
+; ZVFHMIN32-NEXT: srli a3, a3, 2
+; ZVFHMIN32-NEXT: sub a4, a1, a0
+; ZVFHMIN32-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN32-NEXT: vslidedown.vx v0, v8, a3
+; ZVFHMIN32-NEXT: sltu a3, a1, a4
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN32-NEXT: vxor.vx v8, v24, a2
+; ZVFHMIN32-NEXT: addi a3, a3, -1
+; ZVFHMIN32-NEXT: and a3, a3, a4
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN32-NEXT: addi a2, sp, 16
+; ZVFHMIN32-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 4
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN32-NEXT: csrr a2, vlenb
+; ZVFHMIN32-NEXT: slli a2, a2, 3
+; ZVFHMIN32-NEXT: mv a3, a2
+; ZVFHMIN32-NEXT: slli a2, a2, 1
+; ZVFHMIN32-NEXT: add a2, a2, a3
+; ZVFHMIN32-NEXT: add a2, sp, a2
+; ZVFHMIN32-NEXT: addi a2, a2, 16
+; ZVFHMIN32-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN32-NEXT: addi a2, sp, 16
+; ZVFHMIN32-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vfmadd.vv v16, v24, v8, v0.t
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFHMIN32-NEXT: bltu a1, a0, .LBB301_2
+; ZVFHMIN32-NEXT: # %bb.1:
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: .LBB301_2:
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 4
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 3
+; ZVFHMIN32-NEXT: mv a1, a0
+; ZVFHMIN32-NEXT: slli a0, a0, 1
+; ZVFHMIN32-NEXT: add a0, a0, a1
+; ZVFHMIN32-NEXT: add a0, sp, a0
+; ZVFHMIN32-NEXT: addi a0, a0, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vfwcvt.f.f.v v0, v16
+; ZVFHMIN32-NEXT: addi a0, sp, 16
+; ZVFHMIN32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN32-NEXT: vfmadd.vv v0, v16, v24
+; ZVFHMIN32-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN32-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN32-NEXT: csrr a0, vlenb
+; ZVFHMIN32-NEXT: slli a0, a0, 5
+; ZVFHMIN32-NEXT: add sp, sp, a0
+; ZVFHMIN32-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN32-NEXT: addi sp, sp, 16
+; ZVFHMIN32-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN32-NEXT: ret
+;
+; ZVFHMIN64-LABEL: vfnmsub_vv_nxv32f16_unmasked_commuted:
+; ZVFHMIN64: # %bb.0:
+; ZVFHMIN64-NEXT: addi sp, sp, -16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 5
+; ZVFHMIN64-NEXT: sub sp, sp, a2
+; ZVFHMIN64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: mv a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 1
+; ZVFHMIN64-NEXT: add a2, a2, a3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vl8re16.v v24, (a0)
+; ZVFHMIN64-NEXT: lui a2, 8
+; ZVFHMIN64-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; ZVFHMIN64-NEXT: vmset.m v7
+; ZVFHMIN64-NEXT: csrr a3, vlenb
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT: vxor.vx v8, v16, a2
+; ZVFHMIN64-NEXT: slli a0, a3, 1
+; ZVFHMIN64-NEXT: srli a3, a3, 2
+; ZVFHMIN64-NEXT: sub a4, a1, a0
+; ZVFHMIN64-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
+; ZVFHMIN64-NEXT: vslidedown.vx v0, v7, a3
+; ZVFHMIN64-NEXT: sltu a3, a1, a4
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; ZVFHMIN64-NEXT: vxor.vx v16, v24, a2
+; ZVFHMIN64-NEXT: addi a3, a3, -1
+; ZVFHMIN64-NEXT: and a3, a3, a4
+; ZVFHMIN64-NEXT: addi a2, sp, 16
+; ZVFHMIN64-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vsetvli zero, a3, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 4
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: mv a3, a2
+; ZVFHMIN64-NEXT: slli a2, a2, 1
+; ZVFHMIN64-NEXT: add a2, a2, a3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFHMIN64-NEXT: csrr a2, vlenb
+; ZVFHMIN64-NEXT: slli a2, a2, 3
+; ZVFHMIN64-NEXT: add a2, sp, a2
+; ZVFHMIN64-NEXT: addi a2, a2, 16
+; ZVFHMIN64-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vfmadd.vv v24, v16, v8, v0.t
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.f.f.w v12, v24, v0.t
+; ZVFHMIN64-NEXT: bltu a1, a0, .LBB301_2
+; ZVFHMIN64-NEXT: # %bb.1:
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: .LBB301_2:
+; ZVFHMIN64-NEXT: addi a0, sp, 16
+; ZVFHMIN64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 4
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: mv a1, a0
+; ZVFHMIN64-NEXT: slli a0, a0, 1
+; ZVFHMIN64-NEXT: add a0, a0, a1
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vfwcvt.f.f.v v0, v24
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 3
+; ZVFHMIN64-NEXT: add a0, sp, a0
+; ZVFHMIN64-NEXT: addi a0, a0, 16
+; ZVFHMIN64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN64-NEXT: vfmadd.vv v0, v16, v24
+; ZVFHMIN64-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN64-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN64-NEXT: csrr a0, vlenb
+; ZVFHMIN64-NEXT: slli a0, a0, 5
+; ZVFHMIN64-NEXT: add sp, sp, a0
+; ZVFHMIN64-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN64-NEXT: addi sp, sp, 16
+; ZVFHMIN64-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN64-NEXT: ret
%negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
%negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
%v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
index b83ddce61f44d..1f53b29b726aa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
@@ -475,9 +475,10 @@ define <vscale x 16 x i64> @test_vp_splice_nxv16i64(<vscale x 16 x i64> %va, <vs
; CHECK-NEXT: addi a7, a7, -1
; CHECK-NEXT: add a5, a0, a1
; CHECK-NEXT: mv a6, a2
+; CHECK-NEXT: mv t0, a2
; CHECK-NEXT: bltu a2, a7, .LBB22_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a6, a7
+; CHECK-NEXT: mv t0, a7
; CHECK-NEXT: .LBB22_2:
; CHECK-NEXT: addi sp, sp, -80
; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
@@ -488,31 +489,30 @@ define <vscale x 16 x i64> @test_vp_splice_nxv16i64(<vscale x 16 x i64> %va, <vs
; CHECK-NEXT: sub sp, sp, a7
; CHECK-NEXT: andi sp, sp, -64
; CHECK-NEXT: vl8re64.v v24, (a5)
-; CHECK-NEXT: slli a5, a6, 3
-; CHECK-NEXT: addi a6, sp, 64
-; CHECK-NEXT: add a5, a6, a5
-; CHECK-NEXT: mv a7, a2
-; CHECK-NEXT: bltu a2, a4, .LBB22_4
+; CHECK-NEXT: slli a5, t0, 3
+; CHECK-NEXT: addi a7, sp, 64
+; CHECK-NEXT: add a5, a7, a5
+; CHECK-NEXT: bltu a6, a4, .LBB22_4
; CHECK-NEXT: # %bb.3:
-; CHECK-NEXT: mv a7, a4
+; CHECK-NEXT: mv a6, a4
; CHECK-NEXT: .LBB22_4:
; CHECK-NEXT: vl8re64.v v0, (a0)
-; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma
-; CHECK-NEXT: vse64.v v8, (a6)
+; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
+; CHECK-NEXT: vse64.v v8, (a7)
; CHECK-NEXT: sub a0, a2, a4
-; CHECK-NEXT: add a6, a6, a1
-; CHECK-NEXT: sub a7, a3, a4
+; CHECK-NEXT: add a7, a7, a1
+; CHECK-NEXT: sub a6, a3, a4
; CHECK-NEXT: sltu a2, a2, a0
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a2, a2, a0
-; CHECK-NEXT: sltu a0, a3, a7
+; CHECK-NEXT: sltu a0, a3, a6
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a7
-; CHECK-NEXT: add a7, a5, a1
+; CHECK-NEXT: and a0, a0, a6
+; CHECK-NEXT: add a6, a5, a1
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vse64.v v16, (a6)
+; CHECK-NEXT: vse64.v v16, (a7)
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vse64.v v24, (a7)
+; CHECK-NEXT: vse64.v v24, (a6)
; CHECK-NEXT: bltu a3, a4, .LBB22_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a3, a4
@@ -539,48 +539,48 @@ define <vscale x 16 x i64> @test_vp_splice_nxv16i64_negative_offset(<vscale x 16
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a4, vlenb
; CHECK-NEXT: slli a1, a4, 3
-; CHECK-NEXT: slli a7, a4, 1
-; CHECK-NEXT: addi a7, a7, -1
+; CHECK-NEXT: slli a6, a4, 1
+; CHECK-NEXT: addi a6, a6, -1
; CHECK-NEXT: add a5, a0, a1
-; CHECK-NEXT: mv a6, a2
-; CHECK-NEXT: bltu a2, a7, .LBB23_2
+; CHECK-NEXT: mv a7, a2
+; CHECK-NEXT: mv t0, a2
+; CHECK-NEXT: bltu a2, a6, .LBB23_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a6, a7
+; CHECK-NEXT: mv t0, a6
; CHECK-NEXT: .LBB23_2:
; CHECK-NEXT: addi sp, sp, -80
; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
; CHECK-NEXT: addi s0, sp, 80
-; CHECK-NEXT: csrr a7, vlenb
-; CHECK-NEXT: slli a7, a7, 5
-; CHECK-NEXT: sub sp, sp, a7
+; CHECK-NEXT: csrr a6, vlenb
+; CHECK-NEXT: slli a6, a6, 5
+; CHECK-NEXT: sub sp, sp, a6
; CHECK-NEXT: andi sp, sp, -64
; CHECK-NEXT: vl8re64.v v24, (a5)
-; CHECK-NEXT: slli a5, a6, 3
-; CHECK-NEXT: addi a7, sp, 64
-; CHECK-NEXT: add a6, a7, a5
-; CHECK-NEXT: mv t0, a2
-; CHECK-NEXT: bltu a2, a4, .LBB23_4
+; CHECK-NEXT: slli a5, t0, 3
+; CHECK-NEXT: addi t0, sp, 64
+; CHECK-NEXT: add a6, t0, a5
+; CHECK-NEXT: bltu a7, a4, .LBB23_4
; CHECK-NEXT: # %bb.3:
-; CHECK-NEXT: mv t0, a4
+; CHECK-NEXT: mv a7, a4
; CHECK-NEXT: .LBB23_4:
; CHECK-NEXT: vl8re64.v v0, (a0)
-; CHECK-NEXT: vsetvli zero, t0, e64, m8, ta, ma
-; CHECK-NEXT: vse64.v v8, (a7)
+; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma
+; CHECK-NEXT: vse64.v v8, (t0)
; CHECK-NEXT: sub a0, a2, a4
-; CHECK-NEXT: add a7, a7, a1
-; CHECK-NEXT: sub t0, a3, a4
+; CHECK-NEXT: add t0, t0, a1
+; CHECK-NEXT: sub a7, a3, a4
; CHECK-NEXT: sltu a2, a2, a0
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a2, a2, a0
-; CHECK-NEXT: sltu a0, a3, t0
+; CHECK-NEXT: sltu a0, a3, a7
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, t0
-; CHECK-NEXT: add t0, a6, a1
+; CHECK-NEXT: and a0, a0, a7
+; CHECK-NEXT: add a7, a6, a1
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vse64.v v16, (a7)
+; CHECK-NEXT: vse64.v v16, (t0)
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vse64.v v24, (t0)
+; CHECK-NEXT: vse64.v v24, (a7)
; CHECK-NEXT: bltu a3, a4, .LBB23_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a3, a4
diff --git a/llvm/test/CodeGen/X86/bmi-select-distrib.ll b/llvm/test/CodeGen/X86/bmi-select-distrib.ll
index e5696ded4fbf1..021b9747795ef 100644
--- a/llvm/test/CodeGen/X86/bmi-select-distrib.ll
+++ b/llvm/test/CodeGen/X86/bmi-select-distrib.ll
@@ -75,14 +75,14 @@ define i64 @and_select_neg_to_blsi_i64(i1 %a0, i64 %a1) nounwind {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: xorl %edx, %edx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl %esi, %eax
; X86-NEXT: negl %eax
-; X86-NEXT: sbbl %esi, %edx
-; X86-NEXT: andl %esi, %edx
-; X86-NEXT: andl %ecx, %eax
+; X86-NEXT: sbbl %ecx, %edx
+; X86-NEXT: andl %ecx, %edx
+; X86-NEXT: andl %esi, %eax
; X86-NEXT: testb $1, {{[0-9]+}}(%esp)
-; X86-NEXT: cmovel %esi, %edx
-; X86-NEXT: cmovel %ecx, %eax
+; X86-NEXT: cmovel %ecx, %edx
+; X86-NEXT: cmovel %esi, %eax
; X86-NEXT: popl %esi
; X86-NEXT: retl
;
@@ -399,15 +399,15 @@ define i64 @and_select_sub_1_to_blsr_i64(i1 %a0, i64 %a1) nounwind {
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $-1, %eax
-; X86-NEXT: movl %esi, %edx
+; X86-NEXT: movl %ecx, %edx
; X86-NEXT: adcl $-1, %edx
-; X86-NEXT: andl %esi, %edx
-; X86-NEXT: andl %ecx, %eax
+; X86-NEXT: andl %ecx, %edx
+; X86-NEXT: andl %esi, %eax
; X86-NEXT: testb $1, {{[0-9]+}}(%esp)
-; X86-NEXT: cmovel %ecx, %eax
-; X86-NEXT: cmovel %esi, %edx
+; X86-NEXT: cmovel %esi, %eax
+; X86-NEXT: cmovel %ecx, %edx
; X86-NEXT: popl %esi
; X86-NEXT: retl
;
@@ -691,15 +691,15 @@ define i64 @xor_select_sub_1_to_blsmsk_i64(i1 %a0, i64 %a1) nounwind {
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $-1, %eax
-; X86-NEXT: movl %esi, %edx
+; X86-NEXT: movl %ecx, %edx
; X86-NEXT: adcl $-1, %edx
-; X86-NEXT: xorl %esi, %edx
-; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: xorl %ecx, %edx
+; X86-NEXT: xorl %esi, %eax
; X86-NEXT: testb $1, {{[0-9]+}}(%esp)
-; X86-NEXT: cmovel %ecx, %eax
-; X86-NEXT: cmovel %esi, %edx
+; X86-NEXT: cmovel %esi, %eax
+; X86-NEXT: cmovel %ecx, %edx
; X86-NEXT: popl %esi
; X86-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/insertelement-var-index.ll b/llvm/test/CodeGen/X86/insertelement-var-index.ll
index d151c6f28e51b..9aa0a1dedf594 100644
--- a/llvm/test/CodeGen/X86/insertelement-var-index.ll
+++ b/llvm/test/CodeGen/X86/insertelement-var-index.ll
@@ -1009,19 +1009,18 @@ define <2 x i64> @arg_i64_v2i64(<2 x i64> %v, i64 %x, i32 %y) nounwind {
; X86AVX2-NEXT: pushl %esi
; X86AVX2-NEXT: andl $-16, %esp
; X86AVX2-NEXT: subl $48, %esp
-; X86AVX2-NEXT: movl 8(%ebp), %edx
-; X86AVX2-NEXT: movl 12(%ebp), %eax
-; X86AVX2-NEXT: movl 16(%ebp), %ecx
+; X86AVX2-NEXT: movl 16(%ebp), %eax
+; X86AVX2-NEXT: movl 8(%ebp), %ecx
+; X86AVX2-NEXT: movl 12(%ebp), %edx
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
-; X86AVX2-NEXT: addl %ecx, %ecx
-; X86AVX2-NEXT: movl %ecx, %esi
+; X86AVX2-NEXT: leal (%eax,%eax), %esi
; X86AVX2-NEXT: andl $3, %esi
-; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
+; X86AVX2-NEXT: movl %ecx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT: incl %ecx
-; X86AVX2-NEXT: andl $3, %ecx
-; X86AVX2-NEXT: movl %eax, 16(%esp,%ecx,4)
+; X86AVX2-NEXT: leal 1(%eax,%eax), %eax
+; X86AVX2-NEXT: andl $3, %eax
+; X86AVX2-NEXT: movl %edx, 16(%esp,%eax,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %xmm0
; X86AVX2-NEXT: leal -4(%ebp), %esp
; X86AVX2-NEXT: popl %esi
@@ -1363,13 +1362,12 @@ define <2 x i64> @load_i64_v2i64(<2 x i64> %v, ptr %p, i32 %y) nounwind {
; X86AVX2-NEXT: movl (%ecx), %edx
; X86AVX2-NEXT: movl 4(%ecx), %ecx
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
-; X86AVX2-NEXT: addl %eax, %eax
-; X86AVX2-NEXT: movl %eax, %esi
+; X86AVX2-NEXT: leal (%eax,%eax), %esi
; X86AVX2-NEXT: andl $3, %esi
; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT: incl %eax
+; X86AVX2-NEXT: leal 1(%eax,%eax), %eax
; X86AVX2-NEXT: andl $3, %eax
; X86AVX2-NEXT: movl %ecx, 16(%esp,%eax,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %xmm0
@@ -1744,19 +1742,18 @@ define <4 x i64> @arg_i64_v4i64(<4 x i64> %v, i64 %x, i32 %y) nounwind {
; X86AVX2-NEXT: pushl %esi
; X86AVX2-NEXT: andl $-32, %esp
; X86AVX2-NEXT: subl $96, %esp
-; X86AVX2-NEXT: movl 8(%ebp), %edx
-; X86AVX2-NEXT: movl 12(%ebp), %eax
-; X86AVX2-NEXT: movl 16(%ebp), %ecx
+; X86AVX2-NEXT: movl 16(%ebp), %eax
+; X86AVX2-NEXT: movl 8(%ebp), %ecx
+; X86AVX2-NEXT: movl 12(%ebp), %edx
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
-; X86AVX2-NEXT: addl %ecx, %ecx
-; X86AVX2-NEXT: movl %ecx, %esi
+; X86AVX2-NEXT: leal (%eax,%eax), %esi
; X86AVX2-NEXT: andl $7, %esi
-; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
+; X86AVX2-NEXT: movl %ecx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT: incl %ecx
-; X86AVX2-NEXT: andl $7, %ecx
-; X86AVX2-NEXT: movl %eax, 32(%esp,%ecx,4)
+; X86AVX2-NEXT: leal 1(%eax,%eax), %eax
+; X86AVX2-NEXT: andl $7, %eax
+; X86AVX2-NEXT: movl %edx, 32(%esp,%eax,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %ymm0
; X86AVX2-NEXT: leal -4(%ebp), %esp
; X86AVX2-NEXT: popl %esi
@@ -2131,13 +2128,12 @@ define <4 x i64> @load_i64_v4i64(<4 x i64> %v, ptr %p, i32 %y) nounwind {
; X86AVX2-NEXT: movl (%ecx), %edx
; X86AVX2-NEXT: movl 4(%ecx), %ecx
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
-; X86AVX2-NEXT: addl %eax, %eax
-; X86AVX2-NEXT: movl %eax, %esi
+; X86AVX2-NEXT: leal (%eax,%eax), %esi
; X86AVX2-NEXT: andl $7, %esi
; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT: incl %eax
+; X86AVX2-NEXT: leal 1(%eax,%eax), %eax
; X86AVX2-NEXT: andl $7, %eax
; X86AVX2-NEXT: movl %ecx, 32(%esp,%eax,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %ymm0
diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
index 7c9adaf31aff5..1254100e88a82 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
@@ -159,21 +159,21 @@ define <8 x i32> @vec256_i32_unsigned_reg_reg(<8 x i32> %a1, <8 x i32> %a2) noun
define <8 x i32> @vec256_i32_signed_mem_reg(ptr %a1_addr, <8 x i32> %a2) nounwind {
; AVX1-LABEL: vec256_i32_signed_mem_reg:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa (%rdi), %xmm1
-; AVX1-NEXT: vmovdqa 16(%rdi), %xmm2
-; AVX1-NEXT: vpminsd %xmm0, %xmm1, %xmm3
-; AVX1-NEXT: vpmaxsd %xmm0, %xmm1, %xmm4
-; AVX1-NEXT: vpsubd %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vmovdqa (%rdi), %ymm1
+; AVX1-NEXT: vpminsd %xmm0, %xmm1, %xmm2
+; AVX1-NEXT: vpmaxsd %xmm0, %xmm1, %xmm3
+; AVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpminsd %xmm0, %xmm2, %xmm4
-; AVX1-NEXT: vpmaxsd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpminsd %xmm0, %xmm3, %xmm4
+; AVX1-NEXT: vpmaxsd %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vpsubd %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
-; AVX1-NEXT: vpmulld %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT: vpmulld %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpmulld %xmm0, %xmm0, %xmm0
-; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
@@ -190,20 +190,20 @@ define <8 x i32> @vec256_i32_signed_mem_reg(ptr %a1_addr, <8 x i32> %a2) nounwin
;
; XOP-LABEL: vec256_i32_signed_mem_reg:
; XOP: # %bb.0:
-; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1
-; XOP-NEXT: vmovdqa (%rdi), %xmm2
-; XOP-NEXT: vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT: vpminsd %xmm1, %xmm3, %xmm4
-; XOP-NEXT: vpmaxsd %xmm1, %xmm3, %xmm1
-; XOP-NEXT: vpsubd %xmm4, %xmm1, %xmm1
-; XOP-NEXT: vpminsd %xmm0, %xmm2, %xmm4
-; XOP-NEXT: vpmaxsd %xmm0, %xmm2, %xmm0
+; XOP-NEXT: vmovdqa (%rdi), %ymm1
+; XOP-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOP-NEXT: vextractf128 $1, %ymm1, %xmm3
+; XOP-NEXT: vpminsd %xmm2, %xmm3, %xmm4
+; XOP-NEXT: vpmaxsd %xmm2, %xmm3, %xmm2
+; XOP-NEXT: vpsubd %xmm4, %xmm2, %xmm2
+; XOP-NEXT: vpminsd %xmm0, %xmm1, %xmm4
+; XOP-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
; XOP-NEXT: vpsubd %xmm4, %xmm0, %xmm0
; XOP-NEXT: vpsrld $1, %xmm0, %xmm0
-; XOP-NEXT: vpsrld $1, %xmm1, %xmm1
-; XOP-NEXT: vpmacsdd %xmm3, %xmm1, %xmm1, %xmm1
-; XOP-NEXT: vpmacsdd %xmm2, %xmm0, %xmm0, %xmm0
-; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT: vpsrld $1, %xmm2, %xmm2
+; XOP-NEXT: vpmacsdd %xmm3, %xmm2, %xmm2, %xmm2
+; XOP-NEXT: vpmacsdd %xmm1, %xmm0, %xmm0, %xmm0
+; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; XOP-NEXT: retq
;
; AVX512-LABEL: vec256_i32_signed_mem_reg:
@@ -303,23 +303,23 @@ define <8 x i32> @vec256_i32_signed_reg_mem(<8 x i32> %a1, ptr %a2_addr) nounwin
define <8 x i32> @vec256_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
; AVX1-LABEL: vec256_i32_signed_mem_mem:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa (%rsi), %ymm0
-; AVX1-NEXT: vmovdqa (%rdi), %xmm1
-; AVX1-NEXT: vmovdqa 16(%rdi), %xmm2
-; AVX1-NEXT: vpminsd %xmm0, %xmm1, %xmm3
-; AVX1-NEXT: vpmaxsd %xmm0, %xmm1, %xmm4
-; AVX1-NEXT: vpsubd %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpminsd %xmm0, %xmm2, %xmm4
-; AVX1-NEXT: vpmaxsd %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vpsubd %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
-; AVX1-NEXT: vpmulld %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpmulld %xmm0, %xmm0, %xmm0
-; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vmovdqa (%rdi), %ymm0
+; AVX1-NEXT: vmovdqa (%rsi), %ymm1
+; AVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm3
+; AVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpminsd %xmm1, %xmm3, %xmm4
+; AVX1-NEXT: vpmaxsd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsubd %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT: vpmulld %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpmulld %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: vec256_i32_signed_mem_mem:
@@ -336,20 +336,20 @@ define <8 x i32> @vec256_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
;
; XOP-LABEL: vec256_i32_signed_mem_mem:
; XOP: # %bb.0:
-; XOP-NEXT: vmovdqa (%rsi), %ymm0
-; XOP-NEXT: vmovdqa (%rdi), %xmm1
-; XOP-NEXT: vmovdqa 16(%rdi), %xmm2
+; XOP-NEXT: vmovdqa (%rdi), %ymm0
+; XOP-NEXT: vmovdqa (%rsi), %ymm1
+; XOP-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3
-; XOP-NEXT: vpminsd %xmm3, %xmm2, %xmm4
-; XOP-NEXT: vpmaxsd %xmm3, %xmm2, %xmm3
-; XOP-NEXT: vpsubd %xmm4, %xmm3, %xmm3
-; XOP-NEXT: vpminsd %xmm0, %xmm1, %xmm4
-; XOP-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
-; XOP-NEXT: vpsubd %xmm4, %xmm0, %xmm0
-; XOP-NEXT: vpsrld $1, %xmm0, %xmm0
-; XOP-NEXT: vpsrld $1, %xmm3, %xmm3
-; XOP-NEXT: vpmacsdd %xmm2, %xmm3, %xmm3, %xmm2
-; XOP-NEXT: vpmacsdd %xmm1, %xmm0, %xmm0, %xmm0
+; XOP-NEXT: vpminsd %xmm2, %xmm3, %xmm4
+; XOP-NEXT: vpmaxsd %xmm2, %xmm3, %xmm2
+; XOP-NEXT: vpsubd %xmm4, %xmm2, %xmm2
+; XOP-NEXT: vpminsd %xmm1, %xmm0, %xmm4
+; XOP-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
+; XOP-NEXT: vpsubd %xmm4, %xmm1, %xmm1
+; XOP-NEXT: vpsrld $1, %xmm1, %xmm1
+; XOP-NEXT: vpsrld $1, %xmm2, %xmm2
+; XOP-NEXT: vpmacsdd %xmm3, %xmm2, %xmm2, %xmm2
+; XOP-NEXT: vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; XOP-NEXT: retq
;
@@ -727,18 +727,18 @@ define <4 x i64> @vec256_i64_unsigned_reg_reg(<4 x i64> %a1, <4 x i64> %a2) noun
define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwind {
; AVX1-LABEL: vec256_i64_signed_mem_reg:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovdqa (%rdi), %xmm2
-; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm5
-; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vmovdqa (%rdi), %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm5
+; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpsubq %xmm0, %xmm5, %xmm0
-; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpsubq %xmm1, %xmm4, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm6
+; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm6
; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm7
; AVX1-NEXT: vpsrlq $33, %xmm0, %xmm0
; AVX1-NEXT: vpmovsxbq {{.*#+}} xmm8 = [1,1]
@@ -749,19 +749,19 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin
; AVX1-NEXT: vpaddq %xmm0, %xmm5, %xmm0
; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0
; AVX1-NEXT: vpmuludq %xmm7, %xmm9, %xmm5
-; AVX1-NEXT: vpsrlq $33, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $33, %xmm2, %xmm2
; AVX1-NEXT: vpor %xmm4, %xmm8, %xmm7
-; AVX1-NEXT: vpmuludq %xmm7, %xmm1, %xmm1
+; AVX1-NEXT: vpmuludq %xmm7, %xmm2, %xmm2
; AVX1-NEXT: vpsrlq $32, %xmm4, %xmm4
; AVX1-NEXT: vpmuludq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT: vpaddq %xmm1, %xmm4, %xmm1
-; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX1-NEXT: vpmuludq %xmm7, %xmm6, %xmm4
; AVX1-NEXT: vpaddq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpaddq %xmm1, %xmm5, %xmm1
+; AVX1-NEXT: vpaddq %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: vec256_i64_signed_mem_reg:
@@ -787,18 +787,18 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin
;
; XOP-LABEL: vec256_i64_signed_mem_reg:
; XOP: # %bb.0:
-; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1
-; XOP-NEXT: vmovdqa (%rdi), %xmm2
-; XOP-NEXT: vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT: vpcomgtq %xmm1, %xmm3, %xmm4
-; XOP-NEXT: vpcomgtq %xmm0, %xmm2, %xmm5
-; XOP-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; XOP-NEXT: vmovdqa (%rdi), %ymm1
+; XOP-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOP-NEXT: vextractf128 $1, %ymm1, %xmm3
+; XOP-NEXT: vpcomgtq %xmm2, %xmm3, %xmm4
+; XOP-NEXT: vpcomgtq %xmm0, %xmm1, %xmm5
+; XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm0
; XOP-NEXT: vpxor %xmm5, %xmm0, %xmm0
; XOP-NEXT: vpsubq %xmm0, %xmm5, %xmm0
-; XOP-NEXT: vpsubq %xmm1, %xmm3, %xmm1
-; XOP-NEXT: vpxor %xmm4, %xmm1, %xmm1
-; XOP-NEXT: vpsubq %xmm1, %xmm4, %xmm1
-; XOP-NEXT: vpsrlq $1, %xmm1, %xmm6
+; XOP-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; XOP-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; XOP-NEXT: vpsubq %xmm2, %xmm4, %xmm2
+; XOP-NEXT: vpsrlq $1, %xmm2, %xmm6
; XOP-NEXT: vpsrlq $1, %xmm0, %xmm7
; XOP-NEXT: vpsrlq $33, %xmm0, %xmm0
; XOP-NEXT: vpmovsxbq {{.*#+}} xmm8 = [1,1]
@@ -809,19 +809,19 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin
; XOP-NEXT: vpaddq %xmm0, %xmm5, %xmm0
; XOP-NEXT: vpsllq $32, %xmm0, %xmm0
; XOP-NEXT: vpmuludq %xmm7, %xmm9, %xmm5
-; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1
+; XOP-NEXT: vpsrlq $33, %xmm2, %xmm2
; XOP-NEXT: vpor %xmm4, %xmm8, %xmm7
-; XOP-NEXT: vpmuludq %xmm7, %xmm1, %xmm1
+; XOP-NEXT: vpmuludq %xmm7, %xmm2, %xmm2
; XOP-NEXT: vpsrlq $32, %xmm4, %xmm4
; XOP-NEXT: vpmuludq %xmm4, %xmm6, %xmm4
-; XOP-NEXT: vpaddq %xmm1, %xmm4, %xmm1
-; XOP-NEXT: vpsllq $32, %xmm1, %xmm1
+; XOP-NEXT: vpaddq %xmm2, %xmm4, %xmm2
+; XOP-NEXT: vpsllq $32, %xmm2, %xmm2
; XOP-NEXT: vpmuludq %xmm7, %xmm6, %xmm4
; XOP-NEXT: vpaddq %xmm3, %xmm4, %xmm3
-; XOP-NEXT: vpaddq %xmm1, %xmm3, %xmm1
-; XOP-NEXT: vpaddq %xmm2, %xmm5, %xmm2
-; XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
-; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT: vpaddq %xmm2, %xmm3, %xmm2
+; XOP-NEXT: vpaddq %xmm1, %xmm5, %xmm1
+; XOP-NEXT: vpaddq %xmm0, %xmm1, %xmm0
+; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec256_i64_signed_mem_reg:
@@ -897,27 +897,27 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin
define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwind {
; AVX1-LABEL: vec256_i64_signed_reg_mem:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovdqa (%rdi), %xmm2
-; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm1, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm5
-; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm2
-; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpsubq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa (%rdi), %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm5
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm5, %xmm1
+; AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm3
; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsubq %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm6
-; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm7
-; AVX1-NEXT: vpsrlq $33, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm7
+; AVX1-NEXT: vpsrlq $33, %xmm1, %xmm1
; AVX1-NEXT: vpmovsxbq {{.*#+}} xmm8 = [1,1]
; AVX1-NEXT: vpor %xmm5, %xmm8, %xmm9
-; AVX1-NEXT: vpmuludq %xmm2, %xmm9, %xmm2
+; AVX1-NEXT: vpmuludq %xmm1, %xmm9, %xmm1
; AVX1-NEXT: vpsrlq $32, %xmm5, %xmm5
; AVX1-NEXT: vpmuludq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm1, %xmm5, %xmm1
+; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
; AVX1-NEXT: vpmuludq %xmm7, %xmm9, %xmm5
; AVX1-NEXT: vpsrlq $33, %xmm3, %xmm3
; AVX1-NEXT: vpor %xmm4, %xmm8, %xmm7
@@ -927,11 +927,11 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwin
; AVX1-NEXT: vpaddq %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsllq $32, %xmm3, %xmm3
; AVX1-NEXT: vpmuludq %xmm7, %xmm6, %xmm4
-; AVX1-NEXT: vpaddq %xmm1, %xmm4, %xmm1
-; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpaddq %xmm0, %xmm5, %xmm0
-; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: vec256_i64_signed_reg_mem:
@@ -957,27 +957,27 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwin
;
; XOP-LABEL: vec256_i64_signed_reg_mem:
; XOP: # %bb.0:
-; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1
-; XOP-NEXT: vmovdqa (%rdi), %xmm2
-; XOP-NEXT: vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT: vpcomgtq %xmm3, %xmm1, %xmm4
-; XOP-NEXT: vpcomgtq %xmm2, %xmm0, %xmm5
-; XOP-NEXT: vpsubq %xmm2, %xmm0, %xmm2
-; XOP-NEXT: vpxor %xmm5, %xmm2, %xmm2
-; XOP-NEXT: vpsubq %xmm2, %xmm5, %xmm2
-; XOP-NEXT: vpsubq %xmm3, %xmm1, %xmm3
+; XOP-NEXT: vmovdqa (%rdi), %ymm1
+; XOP-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOP-NEXT: vextractf128 $1, %ymm1, %xmm3
+; XOP-NEXT: vpcomgtq %xmm3, %xmm2, %xmm4
+; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm5
+; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; XOP-NEXT: vpxor %xmm5, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm1, %xmm5, %xmm1
+; XOP-NEXT: vpsubq %xmm3, %xmm2, %xmm3
; XOP-NEXT: vpxor %xmm4, %xmm3, %xmm3
; XOP-NEXT: vpsubq %xmm3, %xmm4, %xmm3
; XOP-NEXT: vpsrlq $1, %xmm3, %xmm6
-; XOP-NEXT: vpsrlq $1, %xmm2, %xmm7
-; XOP-NEXT: vpsrlq $33, %xmm2, %xmm2
+; XOP-NEXT: vpsrlq $1, %xmm1, %xmm7
+; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1
; XOP-NEXT: vpmovsxbq {{.*#+}} xmm8 = [1,1]
; XOP-NEXT: vpor %xmm5, %xmm8, %xmm9
-; XOP-NEXT: vpmuludq %xmm2, %xmm9, %xmm2
+; XOP-NEXT: vpmuludq %xmm1, %xmm9, %xmm1
; XOP-NEXT: vpsrlq $32, %xmm5, %xmm5
; XOP-NEXT: vpmuludq %xmm5, %xmm7, %xmm5
-; XOP-NEXT: vpaddq %xmm2, %xmm5, %xmm2
-; XOP-NEXT: vpsllq $32, %xmm2, %xmm2
+; XOP-NEXT: vpaddq %xmm1, %xmm5, %xmm1
+; XOP-NEXT: vpsllq $32, %xmm1, %xmm1
; XOP-NEXT: vpmuludq %xmm7, %xmm9, %xmm5
; XOP-NEXT: vpsrlq $33, %xmm3, %xmm3
; XOP-NEXT: vpor %xmm4, %xmm8, %xmm7
@@ -987,11 +987,11 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwin
; XOP-NEXT: vpaddq %xmm3, %xmm4, %xmm3
; XOP-NEXT: vpsllq $32, %xmm3, %xmm3
; XOP-NEXT: vpmuludq %xmm7, %xmm6, %xmm4
-; XOP-NEXT: vpaddq %xmm1, %xmm4, %xmm1
-; XOP-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; XOP-NEXT: vpaddq %xmm2, %xmm4, %xmm2
+; XOP-NEXT: vpaddq %xmm3, %xmm2, %xmm2
; XOP-NEXT: vpaddq %xmm0, %xmm5, %xmm0
-; XOP-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec256_i64_signed_reg_mem:
@@ -1067,42 +1067,42 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwin
define <4 x i64> @vec256_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
; AVX1-LABEL: vec256_i64_signed_mem_mem:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa (%rsi), %xmm0
-; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1
-; AVX1-NEXT: vmovdqa (%rdi), %xmm2
-; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm5
-; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vpsubq %xmm0, %xmm5, %xmm0
-; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpsubq %xmm1, %xmm4, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm6
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm7
-; AVX1-NEXT: vpsrlq $33, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa (%rdi), %ymm0
+; AVX1-NEXT: vmovdqa (%rsi), %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm5
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm5, %xmm1
+; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm7
+; AVX1-NEXT: vpsrlq $33, %xmm1, %xmm1
; AVX1-NEXT: vpmovsxbq {{.*#+}} xmm8 = [1,1]
; AVX1-NEXT: vpor %xmm5, %xmm8, %xmm9
-; AVX1-NEXT: vpmuludq %xmm0, %xmm9, %xmm0
+; AVX1-NEXT: vpmuludq %xmm1, %xmm9, %xmm1
; AVX1-NEXT: vpsrlq $32, %xmm5, %xmm5
; AVX1-NEXT: vpmuludq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT: vpaddq %xmm0, %xmm5, %xmm0
-; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm1, %xmm5, %xmm1
+; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
; AVX1-NEXT: vpmuludq %xmm7, %xmm9, %xmm5
-; AVX1-NEXT: vpsrlq $33, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $33, %xmm2, %xmm2
; AVX1-NEXT: vpor %xmm4, %xmm8, %xmm7
-; AVX1-NEXT: vpmuludq %xmm7, %xmm1, %xmm1
+; AVX1-NEXT: vpmuludq %xmm7, %xmm2, %xmm2
; AVX1-NEXT: vpsrlq $32, %xmm4, %xmm4
; AVX1-NEXT: vpmuludq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT: vpaddq %xmm1, %xmm4, %xmm1
-; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
; AVX1-NEXT: vpmuludq %xmm7, %xmm6, %xmm4
; AVX1-NEXT: vpaddq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpaddq %xmm0, %xmm5, %xmm0
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: vec256_i64_signed_mem_mem:
@@ -1129,42 +1129,42 @@ define <4 x i64> @vec256_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
;
; XOP-LABEL: vec256_i64_signed_mem_mem:
; XOP: # %bb.0:
-; XOP-NEXT: vmovdqa (%rsi), %xmm0
-; XOP-NEXT: vmovdqa 16(%rsi), %xmm1
-; XOP-NEXT: vmovdqa (%rdi), %xmm2
-; XOP-NEXT: vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT: vpcomgtq %xmm1, %xmm3, %xmm4
-; XOP-NEXT: vpcomgtq %xmm0, %xmm2, %xmm5
-; XOP-NEXT: vpsubq %xmm0, %xmm2, %xmm0
-; XOP-NEXT: vpxor %xmm5, %xmm0, %xmm0
-; XOP-NEXT: vpsubq %xmm0, %xmm5, %xmm0
-; XOP-NEXT: vpsubq %xmm1, %xmm3, %xmm1
-; XOP-NEXT: vpxor %xmm4, %xmm1, %xmm1
-; XOP-NEXT: vpsubq %xmm1, %xmm4, %xmm1
-; XOP-NEXT: vpsrlq $1, %xmm1, %xmm6
-; XOP-NEXT: vpsrlq $1, %xmm0, %xmm7
-; XOP-NEXT: vpsrlq $33, %xmm0, %xmm0
+; XOP-NEXT: vmovdqa (%rdi), %ymm0
+; XOP-NEXT: vmovdqa (%rsi), %ymm1
+; XOP-NEXT: vextractf128 $1, %ymm1, %xmm2
+; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT: vpcomgtq %xmm2, %xmm3, %xmm4
+; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm5
+; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; XOP-NEXT: vpxor %xmm5, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm1, %xmm5, %xmm1
+; XOP-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; XOP-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; XOP-NEXT: vpsubq %xmm2, %xmm4, %xmm2
+; XOP-NEXT: vpsrlq $1, %xmm2, %xmm6
+; XOP-NEXT: vpsrlq $1, %xmm1, %xmm7
+; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1
; XOP-NEXT: vpmovsxbq {{.*#+}} xmm8 = [1,1]
; XOP-NEXT: vpor %xmm5, %xmm8, %xmm9
-; XOP-NEXT: vpmuludq %xmm0, %xmm9, %xmm0
+; XOP-NEXT: vpmuludq %xmm1, %xmm9, %xmm1
; XOP-NEXT: vpsrlq $32, %xmm5, %xmm5
; XOP-NEXT: vpmuludq %xmm5, %xmm7, %xmm5
-; XOP-NEXT: vpaddq %xmm0, %xmm5, %xmm0
-; XOP-NEXT: vpsllq $32, %xmm0, %xmm0
+; XOP-NEXT: vpaddq %xmm1, %xmm5, %xmm1
+; XOP-NEXT: vpsllq $32, %xmm1, %xmm1
; XOP-NEXT: vpmuludq %xmm7, %xmm9, %xmm5
-; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1
+; XOP-NEXT: vpsrlq $33, %xmm2, %xmm2
; XOP-NEXT: vpor %xmm4, %xmm8, %xmm7
-; XOP-NEXT: vpmuludq %xmm7, %xmm1, %xmm1
+; XOP-NEXT: vpmuludq %xmm7, %xmm2, %xmm2
; XOP-NEXT: vpsrlq $32, %xmm4, %xmm4
; XOP-NEXT: vpmuludq %xmm4, %xmm6, %xmm4
-; XOP-NEXT: vpaddq %xmm1, %xmm4, %xmm1
-; XOP-NEXT: vpsllq $32, %xmm1, %xmm1
+; XOP-NEXT: vpaddq %xmm2, %xmm4, %xmm2
+; XOP-NEXT: vpsllq $32, %xmm2, %xmm2
; XOP-NEXT: vpmuludq %xmm7, %xmm6, %xmm4
; XOP-NEXT: vpaddq %xmm3, %xmm4, %xmm3
-; XOP-NEXT: vpaddq %xmm1, %xmm3, %xmm1
-; XOP-NEXT: vpaddq %xmm2, %xmm5, %xmm2
-; XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
-; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT: vpaddq %xmm2, %xmm3, %xmm2
+; XOP-NEXT: vpaddq %xmm0, %xmm5, %xmm0
+; XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec256_i64_signed_mem_mem:
@@ -1499,27 +1499,27 @@ define <16 x i16> @vec256_i16_unsigned_reg_reg(<16 x i16> %a1, <16 x i16> %a2) n
define <16 x i16> @vec256_i16_signed_mem_reg(ptr %a1_addr, <16 x i16> %a2) nounwind {
; AVX1-LABEL: vec256_i16_signed_mem_reg:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovdqa (%rdi), %xmm2
-; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3
-; AVX1-NEXT: vpcmpgtw %xmm1, %xmm3, %xmm4
-; AVX1-NEXT: vpcmpgtw %xmm0, %xmm2, %xmm5
-; AVX1-NEXT: vpminsw %xmm0, %xmm2, %xmm6
-; AVX1-NEXT: vpmaxsw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vmovdqa (%rdi), %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm4
+; AVX1-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm5
+; AVX1-NEXT: vpminsw %xmm0, %xmm1, %xmm6
+; AVX1-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpsubw %xmm6, %xmm0, %xmm0
-; AVX1-NEXT: vpminsw %xmm1, %xmm3, %xmm6
-; AVX1-NEXT: vpmaxsw %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpsubw %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpminsw %xmm2, %xmm3, %xmm6
+; AVX1-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpsubw %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
; AVX1-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vpmullw %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpmullw %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vpmullw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: vec256_i16_signed_mem_reg:
@@ -1537,25 +1537,25 @@ define <16 x i16> @vec256_i16_signed_mem_reg(ptr %a1_addr, <16 x i16> %a2) nounw
;
; XOP-LABEL: vec256_i16_signed_mem_reg:
; XOP: # %bb.0:
-; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1
-; XOP-NEXT: vmovdqa (%rdi), %xmm2
-; XOP-NEXT: vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT: vpcomgtw %xmm1, %xmm3, %xmm4
-; XOP-NEXT: vpcomgtw %xmm0, %xmm2, %xmm5
-; XOP-NEXT: vpminsw %xmm1, %xmm3, %xmm6
-; XOP-NEXT: vpmaxsw %xmm1, %xmm3, %xmm1
-; XOP-NEXT: vpsubw %xmm6, %xmm1, %xmm1
-; XOP-NEXT: vpminsw %xmm0, %xmm2, %xmm6
-; XOP-NEXT: vpmaxsw %xmm0, %xmm2, %xmm0
+; XOP-NEXT: vmovdqa (%rdi), %ymm1
+; XOP-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOP-NEXT: vextractf128 $1, %ymm1, %xmm3
+; XOP-NEXT: vpcomgtw %xmm2, %xmm3, %xmm4
+; XOP-NEXT: vpcomgtw %xmm0, %xmm1, %xmm5
+; XOP-NEXT: vpminsw %xmm2, %xmm3, %xmm6
+; XOP-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2
+; XOP-NEXT: vpsubw %xmm6, %xmm2, %xmm2
+; XOP-NEXT: vpminsw %xmm0, %xmm1, %xmm6
+; XOP-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
; XOP-NEXT: vpsubw %xmm6, %xmm0, %xmm0
; XOP-NEXT: vpsrlw $1, %xmm0, %xmm0
-; XOP-NEXT: vpsrlw $1, %xmm1, %xmm1
+; XOP-NEXT: vpsrlw $1, %xmm2, %xmm2
; XOP-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
; XOP-NEXT: vpor %xmm6, %xmm5, %xmm5
; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4
-; XOP-NEXT: vpmacsww %xmm3, %xmm4, %xmm1, %xmm1
-; XOP-NEXT: vpmacsww %xmm2, %xmm5, %xmm0, %xmm0
-; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT: vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
+; XOP-NEXT: vpmacsww %xmm1, %xmm5, %xmm0, %xmm0
+; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec256_i16_signed_mem_reg:
@@ -1627,27 +1627,27 @@ define <16 x i16> @vec256_i16_signed_mem_reg(ptr %a1_addr, <16 x i16> %a2) nounw
define <16 x i16> @vec256_i16_signed_reg_mem(<16 x i16> %a1, ptr %a2_addr) nounwind {
; AVX1-LABEL: vec256_i16_signed_reg_mem:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovdqa (%rdi), %xmm2
-; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3
-; AVX1-NEXT: vpcmpgtw %xmm3, %xmm1, %xmm4
-; AVX1-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm5
-; AVX1-NEXT: vpminsw %xmm2, %xmm0, %xmm6
-; AVX1-NEXT: vpmaxsw %xmm2, %xmm0, %xmm2
-; AVX1-NEXT: vpsubw %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vpminsw %xmm3, %xmm1, %xmm6
-; AVX1-NEXT: vpmaxsw %xmm3, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa (%rdi), %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpcmpgtw %xmm3, %xmm2, %xmm4
+; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm5
+; AVX1-NEXT: vpminsw %xmm1, %xmm0, %xmm6
+; AVX1-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpsubw %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpminsw %xmm3, %xmm2, %xmm6
+; AVX1-NEXT: vpmaxsw %xmm3, %xmm2, %xmm3
; AVX1-NEXT: vpsubw %xmm6, %xmm3, %xmm3
; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
; AVX1-NEXT: vpor %xmm6, %xmm5, %xmm5
-; AVX1-NEXT: vpmullw %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpmullw %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm6, %xmm4, %xmm4
; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpaddw %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpaddw %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vpaddw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpaddw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: vec256_i16_signed_reg_mem:
@@ -1665,25 +1665,25 @@ define <16 x i16> @vec256_i16_signed_reg_mem(<16 x i16> %a1, ptr %a2_addr) nounw
;
; XOP-LABEL: vec256_i16_signed_reg_mem:
; XOP: # %bb.0:
-; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1
-; XOP-NEXT: vmovdqa (%rdi), %xmm2
-; XOP-NEXT: vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT: vpcomgtw %xmm3, %xmm1, %xmm4
-; XOP-NEXT: vpcomgtw %xmm2, %xmm0, %xmm5
-; XOP-NEXT: vpminsw %xmm3, %xmm1, %xmm6
-; XOP-NEXT: vpmaxsw %xmm3, %xmm1, %xmm3
+; XOP-NEXT: vmovdqa (%rdi), %ymm1
+; XOP-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOP-NEXT: vextractf128 $1, %ymm1, %xmm3
+; XOP-NEXT: vpcomgtw %xmm3, %xmm2, %xmm4
+; XOP-NEXT: vpcomgtw %xmm1, %xmm0, %xmm5
+; XOP-NEXT: vpminsw %xmm3, %xmm2, %xmm6
+; XOP-NEXT: vpmaxsw %xmm3, %xmm2, %xmm3
; XOP-NEXT: vpsubw %xmm6, %xmm3, %xmm3
-; XOP-NEXT: vpminsw %xmm2, %xmm0, %xmm6
-; XOP-NEXT: vpmaxsw %xmm2, %xmm0, %xmm2
-; XOP-NEXT: vpsubw %xmm6, %xmm2, %xmm2
-; XOP-NEXT: vpsrlw $1, %xmm2, %xmm2
+; XOP-NEXT: vpminsw %xmm1, %xmm0, %xmm6
+; XOP-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
+; XOP-NEXT: vpsubw %xmm6, %xmm1, %xmm1
+; XOP-NEXT: vpsrlw $1, %xmm1, %xmm1
; XOP-NEXT: vpsrlw $1, %xmm3, %xmm3
; XOP-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
; XOP-NEXT: vpor %xmm6, %xmm5, %xmm5
; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4
-; XOP-NEXT: vpmacsww %xmm1, %xmm4, %xmm3, %xmm1
-; XOP-NEXT: vpmacsww %xmm0, %xmm5, %xmm2, %xmm0
-; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT: vpmacsww %xmm2, %xmm4, %xmm3, %xmm2
+; XOP-NEXT: vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
+; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec256_i16_signed_reg_mem:
@@ -1755,28 +1755,28 @@ define <16 x i16> @vec256_i16_signed_reg_mem(<16 x i16> %a1, ptr %a2_addr) nounw
define <16 x i16> @vec256_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
; AVX1-LABEL: vec256_i16_signed_mem_mem:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa (%rsi), %xmm0
-; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1
-; AVX1-NEXT: vmovdqa (%rdi), %xmm2
-; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3
-; AVX1-NEXT: vpcmpgtw %xmm1, %xmm3, %xmm4
-; AVX1-NEXT: vpcmpgtw %xmm0, %xmm2, %xmm5
-; AVX1-NEXT: vpminsw %xmm0, %xmm2, %xmm6
-; AVX1-NEXT: vpmaxsw %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vpsubw %xmm6, %xmm0, %xmm0
-; AVX1-NEXT: vpminsw %xmm1, %xmm3, %xmm6
-; AVX1-NEXT: vpmaxsw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vmovdqa (%rdi), %ymm0
+; AVX1-NEXT: vmovdqa (%rsi), %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm4
+; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm5
+; AVX1-NEXT: vpminsw %xmm1, %xmm0, %xmm6
+; AVX1-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpsubw %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpminsw %xmm2, %xmm3, %xmm6
+; AVX1-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpsubw %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
; AVX1-NEXT: vpor %xmm6, %xmm5, %xmm5
-; AVX1-NEXT: vpmullw %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpmullw %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpmullw %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vpmullw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: vec256_i16_signed_mem_mem:
@@ -1795,26 +1795,26 @@ define <16 x i16> @vec256_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwin
;
; XOP-LABEL: vec256_i16_signed_mem_mem:
; XOP: # %bb.0:
-; XOP-NEXT: vmovdqa (%rsi), %xmm0
-; XOP-NEXT: vmovdqa 16(%rsi), %xmm1
-; XOP-NEXT: vmovdqa (%rdi), %xmm2
-; XOP-NEXT: vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT: vpcomgtw %xmm1, %xmm3, %xmm4
-; XOP-NEXT: vpcomgtw %xmm0, %xmm2, %xmm5
-; XOP-NEXT: vpminsw %xmm1, %xmm3, %xmm6
-; XOP-NEXT: vpmaxsw %xmm1, %xmm3, %xmm1
+; XOP-NEXT: vmovdqa (%rdi), %ymm0
+; XOP-NEXT: vmovdqa (%rsi), %ymm1
+; XOP-NEXT: vextractf128 $1, %ymm1, %xmm2
+; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT: vpcomgtw %xmm2, %xmm3, %xmm4
+; XOP-NEXT: vpcomgtw %xmm1, %xmm0, %xmm5
+; XOP-NEXT: vpminsw %xmm2, %xmm3, %xmm6
+; XOP-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2
+; XOP-NEXT: vpsubw %xmm6, %xmm2, %xmm2
+; XOP-NEXT: vpminsw %xmm1, %xmm0, %xmm6
+; XOP-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; XOP-NEXT: vpsubw %xmm6, %xmm1, %xmm1
-; XOP-NEXT: vpminsw %xmm0, %xmm2, %xmm6
-; XOP-NEXT: vpmaxsw %xmm0, %xmm2, %xmm0
-; XOP-NEXT: vpsubw %xmm6, %xmm0, %xmm0
-; XOP-NEXT: vpsrlw $1, %xmm0, %xmm0
; XOP-NEXT: vpsrlw $1, %xmm1, %xmm1
+; XOP-NEXT: vpsrlw $1, %xmm2, %xmm2
; XOP-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
; XOP-NEXT: vpor %xmm6, %xmm5, %xmm5
; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4
-; XOP-NEXT: vpmacsww %xmm3, %xmm4, %xmm1, %xmm1
-; XOP-NEXT: vpmacsww %xmm2, %xmm5, %xmm0, %xmm0
-; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT: vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
+; XOP-NEXT: vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
+; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec256_i16_signed_mem_mem:
@@ -2247,9 +2247,9 @@ define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounw
define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind {
; AVX1-LABEL: vec256_i8_signed_mem_reg:
; AVX1: # %bb.0:
+; AVX1-NEXT: vmovdqa (%rdi), %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vmovdqa (%rdi), %xmm1
-; AVX1-NEXT: vmovdqa 16(%rdi), %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm5
; AVX1-NEXT: vpminsb %xmm0, %xmm1, %xmm6
@@ -2309,19 +2309,19 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind
;
; XOP-LABEL: vec256_i8_signed_mem_reg:
; XOP: # %bb.0:
-; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1
-; XOP-NEXT: vmovdqa (%rdi), %xmm2
-; XOP-NEXT: vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT: vpcomgtb %xmm1, %xmm3, %xmm4
-; XOP-NEXT: vpcomgtb %xmm0, %xmm2, %xmm5
-; XOP-NEXT: vpminsb %xmm0, %xmm2, %xmm6
-; XOP-NEXT: vpmaxsb %xmm0, %xmm2, %xmm0
+; XOP-NEXT: vmovdqa (%rdi), %ymm1
+; XOP-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOP-NEXT: vextractf128 $1, %ymm1, %xmm3
+; XOP-NEXT: vpcomgtb %xmm2, %xmm3, %xmm4
+; XOP-NEXT: vpcomgtb %xmm0, %xmm1, %xmm5
+; XOP-NEXT: vpminsb %xmm0, %xmm1, %xmm6
+; XOP-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
; XOP-NEXT: vpsubb %xmm6, %xmm0, %xmm0
-; XOP-NEXT: vpminsb %xmm1, %xmm3, %xmm6
-; XOP-NEXT: vpmaxsb %xmm1, %xmm3, %xmm1
-; XOP-NEXT: vpsubb %xmm6, %xmm1, %xmm1
+; XOP-NEXT: vpminsb %xmm2, %xmm3, %xmm6
+; XOP-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2
+; XOP-NEXT: vpsubb %xmm6, %xmm2, %xmm2
; XOP-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6
-; XOP-NEXT: vpshlb %xmm6, %xmm1, %xmm1
+; XOP-NEXT: vpshlb %xmm6, %xmm2, %xmm2
; XOP-NEXT: vpshlb %xmm6, %xmm0, %xmm0
; XOP-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; XOP-NEXT: vpor %xmm6, %xmm5, %xmm5
@@ -2334,13 +2334,13 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind
; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm0, %xmm0
; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4
; XOP-NEXT: vpandn %xmm4, %xmm7, %xmm6
-; XOP-NEXT: vpmaddubsw %xmm6, %xmm1, %xmm6
+; XOP-NEXT: vpmaddubsw %xmm6, %xmm2, %xmm6
; XOP-NEXT: vpand %xmm7, %xmm4, %xmm4
-; XOP-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm1
-; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm1, %xmm1
-; XOP-NEXT: vpaddb %xmm3, %xmm1, %xmm1
-; XOP-NEXT: vpaddb %xmm2, %xmm0, %xmm0
-; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT: vpmaddubsw %xmm4, %xmm2, %xmm2
+; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm2, %xmm2
+; XOP-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; XOP-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec256_i8_signed_mem_reg:
@@ -2425,9 +2425,9 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind
define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind {
; AVX1-LABEL: vec256_i8_signed_reg_mem:
; AVX1: # %bb.0:
+; AVX1-NEXT: vmovdqa (%rdi), %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovdqa (%rdi), %xmm2
-; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm4
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm5
; AVX1-NEXT: vpminsb %xmm2, %xmm0, %xmm6
@@ -2487,38 +2487,38 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind
;
; XOP-LABEL: vec256_i8_signed_reg_mem:
; XOP: # %bb.0:
-; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1
-; XOP-NEXT: vmovdqa (%rdi), %xmm2
-; XOP-NEXT: vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT: vpcomgtb %xmm3, %xmm1, %xmm4
-; XOP-NEXT: vpcomgtb %xmm2, %xmm0, %xmm5
-; XOP-NEXT: vpminsb %xmm2, %xmm0, %xmm6
-; XOP-NEXT: vpmaxsb %xmm2, %xmm0, %xmm2
-; XOP-NEXT: vpsubb %xmm6, %xmm2, %xmm2
-; XOP-NEXT: vpminsb %xmm3, %xmm1, %xmm6
-; XOP-NEXT: vpmaxsb %xmm3, %xmm1, %xmm3
+; XOP-NEXT: vmovdqa (%rdi), %ymm1
+; XOP-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOP-NEXT: vextractf128 $1, %ymm1, %xmm3
+; XOP-NEXT: vpcomgtb %xmm3, %xmm2, %xmm4
+; XOP-NEXT: vpcomgtb %xmm1, %xmm0, %xmm5
+; XOP-NEXT: vpminsb %xmm1, %xmm0, %xmm6
+; XOP-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
+; XOP-NEXT: vpsubb %xmm6, %xmm1, %xmm1
+; XOP-NEXT: vpminsb %xmm3, %xmm2, %xmm6
+; XOP-NEXT: vpmaxsb %xmm3, %xmm2, %xmm3
; XOP-NEXT: vpsubb %xmm6, %xmm3, %xmm3
; XOP-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6
; XOP-NEXT: vpshlb %xmm6, %xmm3, %xmm3
-; XOP-NEXT: vpshlb %xmm6, %xmm2, %xmm2
+; XOP-NEXT: vpshlb %xmm6, %xmm1, %xmm1
; XOP-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; XOP-NEXT: vpor %xmm6, %xmm5, %xmm5
; XOP-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
; XOP-NEXT: vpandn %xmm5, %xmm7, %xmm8
-; XOP-NEXT: vpmaddubsw %xmm8, %xmm2, %xmm8
+; XOP-NEXT: vpmaddubsw %xmm8, %xmm1, %xmm8
; XOP-NEXT: vpand %xmm7, %xmm5, %xmm5
-; XOP-NEXT: vpmaddubsw %xmm5, %xmm2, %xmm2
+; XOP-NEXT: vpmaddubsw %xmm5, %xmm1, %xmm1
; XOP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,16,2,18,4,20,6,22,8,24,10,26,12,28,14,30]
-; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm2, %xmm2
+; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm1, %xmm1
; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4
; XOP-NEXT: vpandn %xmm4, %xmm7, %xmm6
; XOP-NEXT: vpmaddubsw %xmm6, %xmm3, %xmm6
; XOP-NEXT: vpand %xmm7, %xmm4, %xmm4
; XOP-NEXT: vpmaddubsw %xmm4, %xmm3, %xmm3
; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm3, %xmm3
-; XOP-NEXT: vpaddb %xmm1, %xmm3, %xmm1
-; XOP-NEXT: vpaddb %xmm0, %xmm2, %xmm0
-; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT: vpaddb %xmm2, %xmm3, %xmm2
+; XOP-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec256_i8_signed_reg_mem:
@@ -2603,44 +2603,44 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind
define <32 x i8> @vec256_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
; AVX1-LABEL: vec256_i8_signed_mem_mem:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa (%rsi), %xmm1
-; AVX1-NEXT: vmovdqa 16(%rsi), %xmm2
-; AVX1-NEXT: vmovdqa (%rdi), %xmm0
-; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3
-; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm4
-; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm5
-; AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm6
-; AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vpsubb %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpminsb %xmm2, %xmm3, %xmm6
-; AVX1-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vmovdqa (%rdi), %ymm0
+; AVX1-NEXT: vmovdqa (%rsi), %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm4
+; AVX1-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm5
+; AVX1-NEXT: vpminsb %xmm2, %xmm0, %xmm6
+; AVX1-NEXT: vpmaxsb %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpsubb %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpminsb %xmm3, %xmm1, %xmm6
+; AVX1-NEXT: vpmaxsb %xmm3, %xmm1, %xmm3
+; AVX1-NEXT: vpsubb %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX1-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm8
-; AVX1-NEXT: vpmaddubsw %xmm8, %xmm1, %xmm8
+; AVX1-NEXT: vpmaddubsw %xmm8, %xmm2, %xmm8
; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm8
; AVX1-NEXT: vpandn %xmm5, %xmm7, %xmm5
-; AVX1-NEXT: vpmaddubsw %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1
-; AVX1-NEXT: vpor %xmm1, %xmm8, %xmm1
+; AVX1-NEXT: vpmaddubsw %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw $8, %xmm2, %xmm2
+; AVX1-NEXT: vpor %xmm2, %xmm8, %xmm2
; AVX1-NEXT: vpor %xmm6, %xmm4, %xmm4
; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm5
-; AVX1-NEXT: vpmaddubsw %xmm5, %xmm2, %xmm5
+; AVX1-NEXT: vpmaddubsw %xmm5, %xmm3, %xmm5
; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpandn %xmm4, %xmm7, %xmm4
-; AVX1-NEXT: vpmaddubsw %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsllw $8, %xmm2, %xmm2
-; AVX1-NEXT: vpor %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vpaddb %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vpmaddubsw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpsllw $8, %xmm3, %xmm3
+; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpaddb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpaddb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: vec256_i8_signed_mem_mem:
@@ -2667,39 +2667,39 @@ define <32 x i8> @vec256_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
;
; XOP-LABEL: vec256_i8_signed_mem_mem:
; XOP: # %bb.0:
-; XOP-NEXT: vmovdqa (%rsi), %xmm0
-; XOP-NEXT: vmovdqa 16(%rsi), %xmm1
-; XOP-NEXT: vmovdqa (%rdi), %xmm2
-; XOP-NEXT: vmovdqa 16(%rdi), %xmm3
-; XOP-NEXT: vpcomgtb %xmm1, %xmm3, %xmm4
-; XOP-NEXT: vpcomgtb %xmm0, %xmm2, %xmm5
-; XOP-NEXT: vpminsb %xmm0, %xmm2, %xmm6
-; XOP-NEXT: vpmaxsb %xmm0, %xmm2, %xmm0
-; XOP-NEXT: vpsubb %xmm6, %xmm0, %xmm0
-; XOP-NEXT: vpminsb %xmm1, %xmm3, %xmm6
-; XOP-NEXT: vpmaxsb %xmm1, %xmm3, %xmm1
+; XOP-NEXT: vmovdqa (%rdi), %ymm0
+; XOP-NEXT: vmovdqa (%rsi), %ymm1
+; XOP-NEXT: vextractf128 $1, %ymm1, %xmm2
+; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT: vpcomgtb %xmm2, %xmm3, %xmm4
+; XOP-NEXT: vpcomgtb %xmm1, %xmm0, %xmm5
+; XOP-NEXT: vpminsb %xmm1, %xmm0, %xmm6
+; XOP-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
; XOP-NEXT: vpsubb %xmm6, %xmm1, %xmm1
+; XOP-NEXT: vpminsb %xmm2, %xmm3, %xmm6
+; XOP-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2
+; XOP-NEXT: vpsubb %xmm6, %xmm2, %xmm2
; XOP-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOP-NEXT: vpshlb %xmm6, %xmm2, %xmm2
; XOP-NEXT: vpshlb %xmm6, %xmm1, %xmm1
-; XOP-NEXT: vpshlb %xmm6, %xmm0, %xmm0
; XOP-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; XOP-NEXT: vpor %xmm6, %xmm5, %xmm5
; XOP-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
; XOP-NEXT: vpandn %xmm5, %xmm7, %xmm8
-; XOP-NEXT: vpmaddubsw %xmm8, %xmm0, %xmm8
+; XOP-NEXT: vpmaddubsw %xmm8, %xmm1, %xmm8
; XOP-NEXT: vpand %xmm7, %xmm5, %xmm5
-; XOP-NEXT: vpmaddubsw %xmm5, %xmm0, %xmm0
+; XOP-NEXT: vpmaddubsw %xmm5, %xmm1, %xmm1
; XOP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,16,2,18,4,20,6,22,8,24,10,26,12,28,14,30]
-; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm0, %xmm0
+; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm1, %xmm1
; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4
; XOP-NEXT: vpandn %xmm4, %xmm7, %xmm6
-; XOP-NEXT: vpmaddubsw %xmm6, %xmm1, %xmm6
+; XOP-NEXT: vpmaddubsw %xmm6, %xmm2, %xmm6
; XOP-NEXT: vpand %xmm7, %xmm4, %xmm4
-; XOP-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm1
-; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm1, %xmm1
-; XOP-NEXT: vpaddb %xmm3, %xmm1, %xmm1
-; XOP-NEXT: vpaddb %xmm2, %xmm0, %xmm0
-; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT: vpmaddubsw %xmm4, %xmm2, %xmm2
+; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm2, %xmm2
+; XOP-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; XOP-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; XOP-NEXT: retq
;
; AVX512F-LABEL: vec256_i8_signed_mem_mem:
diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll
index a4750b4cd4ad0..3c32174b63046 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll
@@ -426,58 +426,58 @@ define <32 x i16> @vec512_i16_unsigned_reg_reg(<32 x i16> %a1, <32 x i16> %a2) n
define <32 x i16> @vec512_i16_signed_mem_reg(ptr %a1_addr, <32 x i16> %a2) nounwind {
; AVX512F-LABEL: vec512_i16_signed_mem_reg:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4
-; AVX512F-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm5
+; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4
+; AVX512F-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm5
; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512F-NEXT: vpminsw %ymm1, %ymm3, %ymm5
-; AVX512F-NEXT: vpmaxsw %ymm1, %ymm3, %ymm1
-; AVX512F-NEXT: vpsubw %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT: vpminsw %ymm0, %ymm2, %ymm5
-; AVX512F-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: vpminsw %ymm2, %ymm3, %ymm5
+; AVX512F-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpsubw %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT: vpminsw %ymm0, %ymm1, %ymm5
+; AVX512F-NEXT: vpmaxsw %ymm0, %ymm1, %ymm0
; AVX512F-NEXT: vpsubw %ymm5, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm5
; AVX512F-NEXT: vpxor %xmm6, %xmm6, %xmm6
-; AVX512F-NEXT: vpsubw %ymm1, %ymm6, %ymm1
+; AVX512F-NEXT: vpsubw %ymm2, %ymm6, %ymm2
; AVX512F-NEXT: vpsubw %ymm0, %ymm6, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm4 & (zmm0 ^ zmm5))
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT: vpaddw %ymm3, %ymm1, %ymm1
-; AVX512F-NEXT: vpaddw %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512F-NEXT: vpaddw %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_mem_reg:
; AVX512VL-FALLBACK: # %bb.0:
-; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm5
+; AVX512VL-FALLBACK-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4
+; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm5
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm3, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm3, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpminsw %ymm0, %ymm2, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpminsw %ymm2, %ymm3, %ymm5
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpminsw %ymm0, %ymm1, %ymm5
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm0, %ymm1, %ymm0
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm5
; AVX512VL-FALLBACK-NEXT: vpxor %xmm6, %xmm6, %xmm6
-; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm6, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm2, %ymm6, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm0, %ymm6, %ymm0
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm4 & (zmm0 ^ zmm5))
-; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpaddw %ymm2, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i16_signed_mem_reg:
@@ -507,58 +507,58 @@ define <32 x i16> @vec512_i16_signed_mem_reg(ptr %a1_addr, <32 x i16> %a2) nounw
define <32 x i16> @vec512_i16_signed_reg_mem(<32 x i16> %a1, ptr %a2_addr) nounwind {
; AVX512F-LABEL: vec512_i16_signed_reg_mem:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512F-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm4
-; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm5
+; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512F-NEXT: vpcmpgtw %ymm3, %ymm2, %ymm4
+; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5
; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512F-NEXT: vpminsw %ymm3, %ymm1, %ymm5
-; AVX512F-NEXT: vpmaxsw %ymm3, %ymm1, %ymm3
+; AVX512F-NEXT: vpminsw %ymm3, %ymm2, %ymm5
+; AVX512F-NEXT: vpmaxsw %ymm3, %ymm2, %ymm3
; AVX512F-NEXT: vpsubw %ymm5, %ymm3, %ymm3
-; AVX512F-NEXT: vpminsw %ymm2, %ymm0, %ymm5
-; AVX512F-NEXT: vpmaxsw %ymm2, %ymm0, %ymm2
-; AVX512F-NEXT: vpsubw %ymm5, %ymm2, %ymm2
-; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT: vpminsw %ymm1, %ymm0, %ymm5
+; AVX512F-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT: vpsubw %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3
-; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm5
; AVX512F-NEXT: vpxor %xmm6, %xmm6, %xmm6
; AVX512F-NEXT: vpsubw %ymm3, %ymm6, %ymm3
-; AVX512F-NEXT: vpsubw %ymm2, %ymm6, %ymm2
-; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512F-NEXT: vpternlogq {{.*#+}} zmm2 = zmm5 ^ (zmm4 & (zmm2 ^ zmm5))
-; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm3
-; AVX512F-NEXT: vpaddw %ymm1, %ymm3, %ymm1
-; AVX512F-NEXT: vpaddw %ymm0, %ymm2, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpsubw %ymm1, %ymm6, %ymm1
+; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512F-NEXT: vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5))
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512F-NEXT: vpaddw %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpaddw %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_reg_mem:
; AVX512VL-FALLBACK: # %bb.0:
-; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm5
+; AVX512VL-FALLBACK-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm3, %ymm2, %ymm4
+; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512VL-FALLBACK-NEXT: vpminsw %ymm3, %ymm1, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm3, %ymm1, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpminsw %ymm3, %ymm2, %ymm5
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm3, %ymm2, %ymm3
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpminsw %ymm2, %ymm0, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm2, %ymm0, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm5
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm5
; AVX512VL-FALLBACK-NEXT: vpxor %xmm6, %xmm6, %xmm6
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm3, %ymm6, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpsubw %ymm2, %ymm6, %ymm2
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512VL-FALLBACK-NEXT: vpternlogq {{.*#+}} zmm2 = zmm5 ^ (zmm4 & (zmm2 ^ zmm5))
-; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpaddw %ymm1, %ymm3, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm2, %ymm0
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm6, %ymm1
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512VL-FALLBACK-NEXT: vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5))
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpaddw %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm1, %ymm0
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i16_signed_reg_mem:
@@ -588,60 +588,60 @@ define <32 x i16> @vec512_i16_signed_reg_mem(<32 x i16> %a1, ptr %a2_addr) nounw
define <32 x i16> @vec512_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
; AVX512F-LABEL: vec512_i16_signed_mem_mem:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa (%rsi), %ymm0
-; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4
-; AVX512F-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm5
+; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512F-NEXT: vmovdqa64 (%rsi), %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
+; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4
+; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5
; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512F-NEXT: vpminsw %ymm1, %ymm3, %ymm5
-; AVX512F-NEXT: vpmaxsw %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT: vpminsw %ymm2, %ymm3, %ymm5
+; AVX512F-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpsubw %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT: vpminsw %ymm1, %ymm0, %ymm5
+; AVX512F-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
; AVX512F-NEXT: vpsubw %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT: vpminsw %ymm0, %ymm2, %ymm5
-; AVX512F-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0
-; AVX512F-NEXT: vpsubw %ymm5, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm5
; AVX512F-NEXT: vpxor %xmm6, %xmm6, %xmm6
+; AVX512F-NEXT: vpsubw %ymm2, %ymm6, %ymm2
; AVX512F-NEXT: vpsubw %ymm1, %ymm6, %ymm1
-; AVX512F-NEXT: vpsubw %ymm0, %ymm6, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-NEXT: vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm4 & (zmm0 ^ zmm5))
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT: vpaddw %ymm3, %ymm1, %ymm1
-; AVX512F-NEXT: vpaddw %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-NEXT: vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5))
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT: vpaddw %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpaddw %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_mem_mem:
; AVX512VL-FALLBACK: # %bb.0:
-; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %ymm0
-; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm5
+; AVX512VL-FALLBACK-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512VL-FALLBACK-NEXT: vmovdqa64 (%rsi), %zmm1
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4
+; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm3, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm3, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpminsw %ymm2, %ymm3, %ymm5
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm5
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpminsw %ymm0, %ymm2, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm5
; AVX512VL-FALLBACK-NEXT: vpxor %xmm6, %xmm6, %xmm6
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm2, %ymm6, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm6, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsubw %ymm0, %ymm6, %ymm0
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512VL-FALLBACK-NEXT: vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm4 & (zmm0 ^ zmm5))
-; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpaddw %ymm2, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512VL-FALLBACK-NEXT: vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5))
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm1, %ymm0
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i16_signed_mem_mem:
@@ -849,66 +849,66 @@ define <64 x i8> @vec512_i8_unsigned_reg_reg(<64 x i8> %a1, <64 x i8> %a2) nounw
define <64 x i8> @vec512_i8_signed_mem_reg(ptr %a1_addr, <64 x i8> %a2) nounwind {
; AVX512F-LABEL: vec512_i8_signed_mem_reg:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512F-NEXT: vpcmpgtb %ymm1, %ymm3, %ymm4
-; AVX512F-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm5
+; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm4
+; AVX512F-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm5
; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512F-NEXT: vpminsb %ymm1, %ymm3, %ymm5
-; AVX512F-NEXT: vpmaxsb %ymm1, %ymm3, %ymm1
-; AVX512F-NEXT: vpsubb %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT: vpminsb %ymm0, %ymm2, %ymm5
-; AVX512F-NEXT: vpmaxsb %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: vpminsb %ymm2, %ymm3, %ymm5
+; AVX512F-NEXT: vpmaxsb %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpsubb %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT: vpminsb %ymm0, %ymm1, %ymm5
+; AVX512F-NEXT: vpmaxsb %ymm0, %ymm1, %ymm0
; AVX512F-NEXT: vpsubb %ymm5, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm5
; AVX512F-NEXT: vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512F-NEXT: vpandq %zmm6, %zmm5, %zmm5
-; AVX512F-NEXT: vpand %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512F-NEXT: vpxor %xmm7, %xmm7, %xmm7
-; AVX512F-NEXT: vpsubb %ymm1, %ymm7, %ymm1
+; AVX512F-NEXT: vpsubb %ymm2, %ymm7, %ymm2
; AVX512F-NEXT: vpand %ymm6, %ymm0, %ymm0
; AVX512F-NEXT: vpsubb %ymm0, %ymm7, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm4 & (zmm0 ^ zmm5))
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT: vpaddb %ymm3, %ymm1, %ymm1
-; AVX512F-NEXT: vpaddb %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512F-NEXT: vpaddb %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_mem_reg:
; AVX512VL-FALLBACK: # %bb.0:
-; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm3, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm5
+; AVX512VL-FALLBACK-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm4
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm5
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm3, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm3, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm0, %ymm2, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm2, %ymm3, %ymm5
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm0, %ymm1, %ymm5
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm0, %ymm1, %ymm0
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm5
; AVX512VL-FALLBACK-NEXT: vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512VL-FALLBACK-NEXT: vpandq %zmm6, %zmm5, %zmm5
-; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpxor %xmm7, %xmm7, %xmm7
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm7, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm2, %ymm7, %ymm2
; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm0, %ymm7, %ymm0
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm4 & (zmm0 ^ zmm5))
-; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpaddb %ymm3, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpaddb %ymm2, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpaddb %ymm3, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i8_signed_mem_reg:
@@ -939,66 +939,66 @@ define <64 x i8> @vec512_i8_signed_mem_reg(ptr %a1_addr, <64 x i8> %a2) nounwind
define <64 x i8> @vec512_i8_signed_reg_mem(<64 x i8> %a1, ptr %a2_addr) nounwind {
; AVX512F-LABEL: vec512_i8_signed_reg_mem:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
-; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm5
+; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm2, %ymm4
+; AVX512F-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm5
; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512F-NEXT: vpminsb %ymm3, %ymm1, %ymm5
-; AVX512F-NEXT: vpmaxsb %ymm3, %ymm1, %ymm3
+; AVX512F-NEXT: vpminsb %ymm3, %ymm2, %ymm5
+; AVX512F-NEXT: vpmaxsb %ymm3, %ymm2, %ymm3
; AVX512F-NEXT: vpsubb %ymm5, %ymm3, %ymm3
-; AVX512F-NEXT: vpminsb %ymm2, %ymm0, %ymm5
-; AVX512F-NEXT: vpmaxsb %ymm2, %ymm0, %ymm2
-; AVX512F-NEXT: vpsubb %ymm5, %ymm2, %ymm2
-; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT: vpminsb %ymm1, %ymm0, %ymm5
+; AVX512F-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT: vpsubb %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3
-; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm5
; AVX512F-NEXT: vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512F-NEXT: vpandq %zmm6, %zmm5, %zmm5
; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3
; AVX512F-NEXT: vpxor %xmm7, %xmm7, %xmm7
; AVX512F-NEXT: vpsubb %ymm3, %ymm7, %ymm3
-; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
-; AVX512F-NEXT: vpsubb %ymm2, %ymm7, %ymm2
-; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512F-NEXT: vpternlogq {{.*#+}} zmm2 = zmm5 ^ (zmm4 & (zmm2 ^ zmm5))
-; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm3
-; AVX512F-NEXT: vpaddb %ymm1, %ymm3, %ymm1
-; AVX512F-NEXT: vpaddb %ymm0, %ymm2, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpand %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT: vpsubb %ymm1, %ymm7, %ymm1
+; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512F-NEXT: vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5))
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512F-NEXT: vpaddb %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpaddb %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_reg_mem:
; AVX512VL-FALLBACK: # %bb.0:
-; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm5
+; AVX512VL-FALLBACK-NEXT: vmovdqa64 (%rdi), %zmm1
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm3, %ymm2, %ymm4
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm5
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm3, %ymm1, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm3, %ymm1, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm3, %ymm2, %ymm5
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm3, %ymm2, %ymm3
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm2, %ymm0, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm2, %ymm0, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm0, %ymm5
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm5
; AVX512VL-FALLBACK-NEXT: vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512VL-FALLBACK-NEXT: vpandq %zmm6, %zmm5, %zmm5
; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm3, %ymm3
; AVX512VL-FALLBACK-NEXT: vpxor %xmm7, %xmm7, %xmm7
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm3, %ymm7, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm2, %ymm7, %ymm2
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512VL-FALLBACK-NEXT: vpternlogq {{.*#+}} zmm2 = zmm5 ^ (zmm4 & (zmm2 ^ zmm5))
-; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm2, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpaddb %ymm1, %ymm3, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm2, %ymm0
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm7, %ymm1
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512VL-FALLBACK-NEXT: vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5))
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpaddb %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm1, %ymm0
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i8_signed_reg_mem:
@@ -1029,68 +1029,68 @@ define <64 x i8> @vec512_i8_signed_reg_mem(<64 x i8> %a1, ptr %a2_addr) nounwind
define <64 x i8> @vec512_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
; AVX512F-LABEL: vec512_i8_signed_mem_mem:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa (%rsi), %ymm0
-; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512F-NEXT: vpcmpgtb %ymm1, %ymm3, %ymm4
-; AVX512F-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm5
+; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512F-NEXT: vmovdqa64 (%rsi), %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
+; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm4
+; AVX512F-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm5
; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512F-NEXT: vpminsb %ymm1, %ymm3, %ymm5
-; AVX512F-NEXT: vpmaxsb %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT: vpminsb %ymm2, %ymm3, %ymm5
+; AVX512F-NEXT: vpmaxsb %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpsubb %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT: vpminsb %ymm1, %ymm0, %ymm5
+; AVX512F-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1
; AVX512F-NEXT: vpsubb %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT: vpminsb %ymm0, %ymm2, %ymm5
-; AVX512F-NEXT: vpmaxsb %ymm0, %ymm2, %ymm0
-; AVX512F-NEXT: vpsubb %ymm5, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm5
; AVX512F-NEXT: vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512F-NEXT: vpandq %zmm6, %zmm5, %zmm5
-; AVX512F-NEXT: vpand %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512F-NEXT: vpxor %xmm7, %xmm7, %xmm7
+; AVX512F-NEXT: vpsubb %ymm2, %ymm7, %ymm2
+; AVX512F-NEXT: vpand %ymm6, %ymm1, %ymm1
; AVX512F-NEXT: vpsubb %ymm1, %ymm7, %ymm1
-; AVX512F-NEXT: vpand %ymm6, %ymm0, %ymm0
-; AVX512F-NEXT: vpsubb %ymm0, %ymm7, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-NEXT: vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm4 & (zmm0 ^ zmm5))
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT: vpaddb %ymm3, %ymm1, %ymm1
-; AVX512F-NEXT: vpaddb %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-NEXT: vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5))
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT: vpaddb %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpaddb %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_mem_mem:
; AVX512VL-FALLBACK: # %bb.0:
-; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %ymm0
-; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm3, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm5
+; AVX512VL-FALLBACK-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512VL-FALLBACK-NEXT: vmovdqa64 (%rsi), %zmm1
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm4
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm5
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm3, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm3, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm2, %ymm3, %ymm5
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm0, %ymm5
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm0, %ymm2, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm0, %ymm2, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm5
; AVX512VL-FALLBACK-NEXT: vpbroadcastd {{.*#+}} zmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512VL-FALLBACK-NEXT: vpandq %zmm6, %zmm5, %zmm5
-; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpxor %xmm7, %xmm7, %xmm7
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm2, %ymm7, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm7, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm0, %ymm7, %ymm0
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512VL-FALLBACK-NEXT: vpternlogq {{.*#+}} zmm0 = zmm5 ^ (zmm4 & (zmm0 ^ zmm5))
-; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpaddb %ymm3, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpaddb %ymm2, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512VL-FALLBACK-NEXT: vpternlogq {{.*#+}} zmm1 = zmm5 ^ (zmm4 & (zmm1 ^ zmm5))
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpaddb %ymm3, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm1, %ymm0
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i8_signed_mem_mem:
More information about the llvm-commits
mailing list