[llvm] Reapply "[AMDGPU] Elide bitcast fold i64 imm to build_vector" (#160325) (PR #184114)
Janek van Oirschot via llvm-commits
llvm-commits at lists.llvm.org
Mon Mar 2 04:53:31 PST 2026
https://github.com/JanekvO created https://github.com/llvm/llvm-project/pull/184114
Reapplies commit 341cdbc9703d3cdd151f897b63548387f0017f49
>From a2510d6ea9982b0edd2666c70c1cc050ecfa6dad Mon Sep 17 00:00:00 2001
From: Janek van Oirschot <janek.vanoirschot at amd.com>
Date: Mon, 2 Feb 2026 18:08:48 +0000
Subject: [PATCH] Reapply "[AMDGPU] Elide bitcast fold i64 imm to build_vector"
(#160325)
Reapplies commit 341cdbc9703d3cdd151f897b63548387f0017f49
---
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 28 ++++++++++++
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h | 3 ++
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 25 ++++++++++-
.../AMDGPU/av-split-dead-valno-crash.ll | 44 ++++++++++---------
llvm/test/CodeGen/AMDGPU/flat-scratch.ll | 6 +--
llvm/test/CodeGen/AMDGPU/imm.ll | 39 ++++++----------
.../siloadstoreopt-misaligned-regsequence.ll | 11 +++--
7 files changed, 99 insertions(+), 57 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 747c223d4781c..752ac6df358be 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -5507,6 +5507,30 @@ SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N,
return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0));
}
+bool AMDGPUTargetLowering::isInt64ImmLegal(SDNode *N, SelectionDAG &DAG) const {
+ if (!Subtarget->isGCN())
+ return false;
+
+ ConstantSDNode *SDConstant = dyn_cast<ConstantSDNode>(N);
+ ConstantFPSDNode *SDFPConstant = dyn_cast<ConstantFPSDNode>(N);
+ auto &ST = DAG.getSubtarget<GCNSubtarget>();
+ const auto *TII = ST.getInstrInfo();
+
+ if (!ST.hasMovB64() || (!SDConstant && !SDFPConstant))
+ return false;
+
+ if (ST.has64BitLiterals())
+ return true;
+
+ if (SDConstant) {
+ const APInt &APVal = SDConstant->getAPIntValue();
+ return isUInt<32>(APVal.getZExtValue()) || TII->isInlineConstant(APVal);
+ }
+
+ APInt Val = SDFPConstant->getValueAPF().bitcastToAPInt();
+ return isUInt<32>(Val.getZExtValue()) || TII->isInlineConstant(Val);
+}
+
SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
@@ -5556,6 +5580,8 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
SDValue Src = N->getOperand(0);
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
SDLoc SL(N);
+ if (isInt64ImmLegal(C, DAG))
+ break;
uint64_t CVal = C->getZExtValue();
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
@@ -5566,6 +5592,8 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
const APInt &Val = C->getValueAPF().bitcastToAPInt();
SDLoc SL(N);
+ if (isInt64ImmLegal(C, DAG))
+ break;
uint64_t CVal = Val.getZExtValue();
SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index b4d1cd921e601..6401e4bdb7ea2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -107,6 +107,9 @@ class AMDGPUTargetLowering : public TargetLowering {
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
protected:
+ /// Check whether value Val can be supported by v_mov_b64, for the current
+ /// target.
+ bool isInt64ImmLegal(SDNode *Val, SelectionDAG &DAG) const;
bool shouldCombineMemoryType(EVT VT) const;
SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const;
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 2f993f4232766..fd1ef0f581a4d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -15804,13 +15804,36 @@ SITargetLowering::performExtractVectorEltCombine(SDNode *N,
return V;
}
+ // EXTRACT_VECTOR_ELT (v2i32 bitcast (i64/f64:k), Idx)
+ // =>
+ // i32:Lo(k) if Idx == 0, or
+ // i32:Hi(k) if Idx == 1
+ auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (Vec.getOpcode() == ISD::BITCAST && VecVT == MVT::v2i32 && Idx) {
+ SDLoc SL(N);
+ SDValue PeekThrough = Vec.getOperand(0);
+ auto *KImm = dyn_cast<ConstantSDNode>(PeekThrough);
+ if (KImm && KImm->getValueType(0).getSizeInBits() == 64) {
+ uint64_t KImmValue = KImm->getZExtValue();
+ return DAG.getConstant(
+ (KImmValue >> (32 * Idx->getZExtValue())) & 0xffffffff, SL, MVT::i32);
+ }
+ auto *KFPImm = dyn_cast<ConstantFPSDNode>(PeekThrough);
+ if (KFPImm && KFPImm->getValueType(0).getSizeInBits() == 64) {
+ uint64_t KFPImmValue =
+ KFPImm->getValueAPF().bitcastToAPInt().getZExtValue();
+ return DAG.getConstant((KFPImmValue >> (32 * Idx->getZExtValue())) &
+ 0xffffffff,
+ SL, MVT::i32);
+ }
+ }
+
if (!DCI.isBeforeLegalize())
return SDValue();
// Try to turn sub-dword accesses of vectors into accesses of the same 32-bit
// elements. This exposes more load reduction opportunities by replacing
// multiple small extract_vector_elements with a single 32-bit extract.
- auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (isa<MemSDNode>(Vec) && VecEltSize <= 16 && VecEltVT.isByteSized() &&
VecSize > 32 && VecSize % 32 == 0 && Idx) {
EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
diff --git a/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll b/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll
index 42f76c4a10d2a..8d2e1c8b89ffd 100644
--- a/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll
+++ b/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll
@@ -7,8 +7,8 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: s_load_dword s0, s[4:5], 0x8
; CHECK-NEXT: s_load_dwordx2 s[10:11], s[4:5], 0x0
; CHECK-NEXT: s_load_dwordx4 s[12:15], s[4:5], 0x10
-; CHECK-NEXT: v_mov_b32_e32 v20, 0
-; CHECK-NEXT: v_mov_b32_e32 v1, 0x3e21eeb6
+; CHECK-NEXT: v_mov_b32_e32 v30, 0x9037ab78
+; CHECK-NEXT: v_mov_b32_e32 v31, 0x3e21eeb6
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_bitcmp1_b32 s0, 0
; CHECK-NEXT: s_cselect_b64 s[16:17], -1, 0
@@ -19,7 +19,6 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: s_xor_b64 s[20:21], s[2:3], -1
; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0
; CHECK-NEXT: s_and_b64 s[2:3], exec, s[2:3]
-; CHECK-NEXT: v_mov_b32_e32 v0, 0x9037ab78
; CHECK-NEXT: v_mov_b32_e32 v2, 0xa17f65f6
; CHECK-NEXT: v_mov_b32_e32 v3, 0xbe927e4f
; CHECK-NEXT: v_mov_b32_e32 v4, 0x19f4ec90
@@ -41,8 +40,9 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: v_mov_b32_e32 v18, 0x55555523
; CHECK-NEXT: v_mov_b32_e32 v19, 0xbfd55555
; CHECK-NEXT: s_and_b64 s[6:7], exec, s[18:19]
-; CHECK-NEXT: v_mov_b32_e32 v21, v20
-; CHECK-NEXT: ; implicit-def: $vgpr30_vgpr31
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b64_e32 v[20:21], 0
+; CHECK-NEXT: ; implicit-def: $agpr0_agpr1
; CHECK-NEXT: ; implicit-def: $vgpr22_vgpr23
; CHECK-NEXT: s_branch .LBB0_2
; CHECK-NEXT: .LBB0_1: ; %Flow9
@@ -62,10 +62,10 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: v_mov_b64_e32 v[24:25], s[14:15]
; CHECK-NEXT: flat_load_dwordx2 v[24:25], v[24:25]
-; CHECK-NEXT: v_mov_b64_e32 v[26:27], v[0:1]
+; CHECK-NEXT: v_mov_b64_e32 v[26:27], v[30:31]
; CHECK-NEXT: v_mov_b64_e32 v[28:29], v[2:3]
-; CHECK-NEXT: v_accvgpr_write_b32 a0, 0
-; CHECK-NEXT: v_accvgpr_write_b32 a1, 0
+; CHECK-NEXT: v_accvgpr_write_b32 a2, 0
+; CHECK-NEXT: v_accvgpr_write_b32 a3, 0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_fmac_f64_e32 v[26:27], 0, v[24:25]
; CHECK-NEXT: v_fmac_f64_e32 v[28:29], 0, v[26:27]
@@ -93,30 +93,32 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: .LBB0_6: ; %.preheader1855.i.i.i3329
; CHECK-NEXT: ; Parent Loop BB0_2 Depth=1
; CHECK-NEXT: ; => This Inner Loop Header: Depth=2
-; CHECK-NEXT: v_accvgpr_read_b32 v27, a1
-; CHECK-NEXT: v_accvgpr_read_b32 v26, a0
+; CHECK-NEXT: v_accvgpr_read_b32 v27, a3
+; CHECK-NEXT: v_accvgpr_read_b32 v26, a2
; CHECK-NEXT: s_mov_b64 s[24:25], -1
; CHECK-NEXT: s_mov_b64 s[8:9], -1
; CHECK-NEXT: s_mov_b64 vcc, s[2:3]
-; CHECK-NEXT: ; implicit-def: $agpr0_agpr1
+; CHECK-NEXT: ; implicit-def: $agpr2_agpr3
; CHECK-NEXT: s_cbranch_vccz .LBB0_5
; CHECK-NEXT: ; %bb.7: ; %.lr.ph2070.i.i.i3291
; CHECK-NEXT: ; in Loop: Header=BB0_6 Depth=2
-; CHECK-NEXT: v_accvgpr_write_b32 a0, v30
-; CHECK-NEXT: v_accvgpr_write_b32 a1, v31
+; CHECK-NEXT: v_accvgpr_mov_b32 a3, a1
+; CHECK-NEXT: v_accvgpr_mov_b32 a2, a0
; CHECK-NEXT: s_mov_b64 s[8:9], s[18:19]
; CHECK-NEXT: s_mov_b64 vcc, s[6:7]
; CHECK-NEXT: s_cbranch_vccz .LBB0_5
; CHECK-NEXT: ; %bb.8: ; %.preheader1856.preheader.i.i.i3325
; CHECK-NEXT: ; in Loop: Header=BB0_6 Depth=2
-; CHECK-NEXT: v_accvgpr_write_b32 a0, v28
+; CHECK-NEXT: v_accvgpr_write_b32 a2, v28
; CHECK-NEXT: s_mov_b64 s[24:25], 0
-; CHECK-NEXT: v_accvgpr_write_b32 a1, v29
+; CHECK-NEXT: v_accvgpr_write_b32 a3, v29
; CHECK-NEXT: s_mov_b64 s[8:9], 0
; CHECK-NEXT: s_branch .LBB0_5
; CHECK-NEXT: .LBB0_9: ; in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: v_mov_b64_e32 v[24:25], s[10:11]
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v24
; CHECK-NEXT: s_mov_b64 s[22:23], 0
-; CHECK-NEXT: v_mov_b64_e32 v[30:31], s[10:11]
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v25
; CHECK-NEXT: s_mov_b64 s[8:9], s[20:21]
; CHECK-NEXT: s_branch .LBB0_15
; CHECK-NEXT: .LBB0_10: ; in Loop: Header=BB0_2 Depth=1
@@ -130,22 +132,24 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: ; %bb.12: ; %._crit_edge2105.i.i.i2330.loopexit
; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], 0, v[26:27]
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v24
; CHECK-NEXT: v_cndmask_b32_e64 v23, v23, 0, s[16:17]
-; CHECK-NEXT: v_cndmask_b32_e64 v22, v22, 0, s[16:17]
; CHECK-NEXT: v_cndmask_b32_e64 v26, 0, 1, s[8:9]
; CHECK-NEXT: v_mov_b32_e32 v27, v26
; CHECK-NEXT: s_and_b64 s[8:9], exec, s[16:17]
-; CHECK-NEXT: global_store_dwordx2 v20, v[26:27], s[12:13]
+; CHECK-NEXT: v_cndmask_b32_e64 v22, v22, 0, s[16:17]
+; CHECK-NEXT: global_store_dwordx2 v0, v[26:27], s[12:13]
; CHECK-NEXT: s_cselect_b32 s23, s23, 0
; CHECK-NEXT: s_cselect_b32 s22, s22, 0
; CHECK-NEXT: s_mov_b64 s[8:9], -1
; CHECK-NEXT: s_branch .LBB0_14
; CHECK-NEXT: .LBB0_13: ; in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v24
; CHECK-NEXT: s_mov_b64 s[8:9], 0
; CHECK-NEXT: v_mov_b64_e32 v[22:23], 0
; CHECK-NEXT: .LBB0_14: ; %Flow6
; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: v_mov_b64_e32 v[30:31], v[24:25]
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v25
; CHECK-NEXT: .LBB0_15: ; %Flow6
; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: s_mov_b64 s[24:25], -1
@@ -154,7 +158,7 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
; CHECK-NEXT: ; %bb.16: ; %._crit_edge2105.i.i.i2330
; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
; CHECK-NEXT: s_mov_b64 s[24:25], 0
-; CHECK-NEXT: global_store_dwordx2 v20, v[20:21], s[12:13]
+; CHECK-NEXT: global_store_dwordx2 v0, v[20:21], s[12:13]
; CHECK-NEXT: s_branch .LBB0_1
; CHECK-NEXT: .LBB0_17: ; %DummyReturnBlock
; CHECK-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
index eb8c65280b998..3ab0521a08061 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
@@ -4152,8 +4152,7 @@ define void @store_load_i64_aligned(ptr addrspace(5) nocapture %arg) {
; GFX942-LABEL: store_load_i64_aligned:
; GFX942: ; %bb.0: ; %bb
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT: v_mov_b32_e32 v2, 15
-; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_mov_b64_e32 v[2:3], 15
; GFX942-NEXT: scratch_store_dwordx2 v0, v[2:3], off sc0 sc1
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: scratch_load_dwordx2 v[0:1], v0, off sc0 sc1
@@ -4263,8 +4262,7 @@ define void @store_load_i64_unaligned(ptr addrspace(5) nocapture %arg) {
; GFX942-LABEL: store_load_i64_unaligned:
; GFX942: ; %bb.0: ; %bb
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT: v_mov_b32_e32 v2, 15
-; GFX942-NEXT: v_mov_b32_e32 v3, 0
+; GFX942-NEXT: v_mov_b64_e32 v[2:3], 15
; GFX942-NEXT: scratch_store_dwordx2 v0, v[2:3], off sc0 sc1
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: scratch_load_dwordx2 v[0:1], v0, off sc0 sc1
diff --git a/llvm/test/CodeGen/AMDGPU/imm.ll b/llvm/test/CodeGen/AMDGPU/imm.ll
index c6e077333ecff..aea3375f0e8bd 100644
--- a/llvm/test/CodeGen/AMDGPU/imm.ll
+++ b/llvm/test/CodeGen/AMDGPU/imm.ll
@@ -1969,10 +1969,9 @@ define amdgpu_kernel void @add_inline_imm_neg_1_f64(ptr addrspace(1) %out, [8 x
; GFX942-LABEL: add_inline_imm_neg_1_f64:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GFX942-NEXT: v_mov_b32_e32 v0, -1
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
-; GFX942-NEXT: v_mov_b32_e32 v1, v0
+; GFX942-NEXT: v_mov_b64_e32 v[0:1], -1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
@@ -2009,8 +2008,7 @@ define amdgpu_kernel void @add_inline_imm_neg_2_f64(ptr addrspace(1) %out, [8 x
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
-; GFX942-NEXT: v_mov_b32_e32 v0, -2
-; GFX942-NEXT: v_mov_b32_e32 v1, -1
+; GFX942-NEXT: v_mov_b64_e32 v[0:1], -2
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
@@ -2047,8 +2045,7 @@ define amdgpu_kernel void @add_inline_imm_neg_16_f64(ptr addrspace(1) %out, [8 x
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
-; GFX942-NEXT: v_mov_b32_e32 v0, -16
-; GFX942-NEXT: v_mov_b32_e32 v1, -1
+; GFX942-NEXT: v_mov_b64_e32 v[0:1], -16
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
@@ -2163,10 +2160,9 @@ define amdgpu_kernel void @store_inline_imm_0.0_f64(ptr addrspace(1) %out) {
; GFX942-LABEL: store_inline_imm_0.0_f64:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
-; GFX942-NEXT: v_mov_b32_e32 v1, v0
+; GFX942-NEXT: v_mov_b64_e32 v[0:1], 0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
@@ -2239,8 +2235,7 @@ define amdgpu_kernel void @store_inline_imm_0.5_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
-; GFX942-NEXT: v_mov_b32_e32 v0, 0
-; GFX942-NEXT: v_mov_b32_e32 v1, 0x3fe00000
+; GFX942-NEXT: v_mov_b64_e32 v[0:1], 0.5
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
@@ -2276,8 +2271,7 @@ define amdgpu_kernel void @store_inline_imm_m_0.5_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
-; GFX942-NEXT: v_mov_b32_e32 v0, 0
-; GFX942-NEXT: v_mov_b32_e32 v1, 0xbfe00000
+; GFX942-NEXT: v_mov_b64_e32 v[0:1], -0.5
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
@@ -2313,8 +2307,7 @@ define amdgpu_kernel void @store_inline_imm_1.0_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
-; GFX942-NEXT: v_mov_b32_e32 v0, 0
-; GFX942-NEXT: v_mov_b32_e32 v1, 0x3ff00000
+; GFX942-NEXT: v_mov_b64_e32 v[0:1], 1.0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
@@ -2350,8 +2343,7 @@ define amdgpu_kernel void @store_inline_imm_m_1.0_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
-; GFX942-NEXT: v_mov_b32_e32 v0, 0
-; GFX942-NEXT: v_mov_b32_e32 v1, 0xbff00000
+; GFX942-NEXT: v_mov_b64_e32 v[0:1], -1.0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
@@ -2387,8 +2379,7 @@ define amdgpu_kernel void @store_inline_imm_2.0_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
-; GFX942-NEXT: v_mov_b32_e32 v0, 0
-; GFX942-NEXT: v_mov_b32_e32 v1, 2.0
+; GFX942-NEXT: v_mov_b64_e32 v[0:1], 2.0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
@@ -2424,8 +2415,7 @@ define amdgpu_kernel void @store_inline_imm_m_2.0_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
-; GFX942-NEXT: v_mov_b32_e32 v0, 0
-; GFX942-NEXT: v_mov_b32_e32 v1, -2.0
+; GFX942-NEXT: v_mov_b64_e32 v[0:1], -2.0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
@@ -2461,8 +2451,7 @@ define amdgpu_kernel void @store_inline_imm_4.0_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
-; GFX942-NEXT: v_mov_b32_e32 v0, 0
-; GFX942-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX942-NEXT: v_mov_b64_e32 v[0:1], 4.0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
@@ -2498,8 +2487,7 @@ define amdgpu_kernel void @store_inline_imm_m_4.0_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
-; GFX942-NEXT: v_mov_b32_e32 v0, 0
-; GFX942-NEXT: v_mov_b32_e32 v1, 0xc0100000
+; GFX942-NEXT: v_mov_b64_e32 v[0:1], -4.0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
@@ -2535,8 +2523,7 @@ define amdgpu_kernel void @store_inv_2pi_f64(ptr addrspace(1) %out) {
; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX942-NEXT: s_mov_b32 s3, 0xf000
; GFX942-NEXT: s_mov_b32 s2, -1
-; GFX942-NEXT: v_mov_b32_e32 v0, 0x6dc9c882
-; GFX942-NEXT: v_mov_b32_e32 v1, 0x3fc45f30
+; GFX942-NEXT: v_mov_b64_e32 v[0:1], 0.15915494309189532
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX942-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/siloadstoreopt-misaligned-regsequence.ll b/llvm/test/CodeGen/AMDGPU/siloadstoreopt-misaligned-regsequence.ll
index e95aba71775b5..b0575440a76ad 100644
--- a/llvm/test/CodeGen/AMDGPU/siloadstoreopt-misaligned-regsequence.ll
+++ b/llvm/test/CodeGen/AMDGPU/siloadstoreopt-misaligned-regsequence.ll
@@ -5,13 +5,12 @@ define amdgpu_kernel void @foo(ptr %0) {
; CHECK-LABEL: foo:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
-; CHECK-NEXT: v_mov_b32_e32 v2, 0
-; CHECK-NEXT: v_mov_b32_e32 v3, v2
-; CHECK-NEXT: v_mov_b32_e32 v4, v3
-; CHECK-NEXT: v_mov_b32_e32 v3, v2
+; CHECK-NEXT: v_mov_b32_e32 v5, 0
+; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: v_mov_b32_e32 v6, 0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
-; CHECK-NEXT: flat_store_dwordx3 v[0:1], v[2:4]
+; CHECK-NEXT: v_mov_b64_e32 v[2:3], s[0:1]
+; CHECK-NEXT: flat_store_dwordx3 v[2:3], v[4:6]
; CHECK-NEXT: s_endpgm
entry:
%1 = getelementptr inbounds i8, ptr %0, i64 4
More information about the llvm-commits
mailing list