[llvm] [AMDGPU] Elide bitcast fold i64 imm to build_vector (PR #154115)

Janek van Oirschot via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 21 07:38:52 PDT 2025


https://github.com/JanekvO updated https://github.com/llvm/llvm-project/pull/154115

>From 116ec2c63548ed67ee3dd4cedfbeed20fa1b14de Mon Sep 17 00:00:00 2001
From: Janek van Oirschot <janek.vanoirschot at amd.com>
Date: Mon, 18 Aug 2025 14:20:10 +0100
Subject: [PATCH 1/2] [AMDGPU] Elide bitcast combine to build_vector in case
 i64 constant can be materialized

---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 11 +++++++++++
 .../AMDGPU/av-split-dead-valno-crash.ll       | 19 ++++++++++++-------
 llvm/test/CodeGen/AMDGPU/flat-scratch.ll      |  6 ++----
 3 files changed, 25 insertions(+), 11 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 8ccd8fcc08d38..9df47c2c5ce6b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -5338,6 +5338,13 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
     if (DestVT.getSizeInBits() != 64 || !DestVT.isVector())
       break;
 
+    auto canMov64b = [&](uint64_t Val) -> bool {
+      if (!Subtarget->isGCN())
+        return false;
+      auto &ST = DAG.getSubtarget<GCNSubtarget>();
+      return ST.hasMovB64() && (ST.has64BitLiterals() || isUInt<32>(Val));
+    };
+
     // Fold bitcasts of constants.
     //
     // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
@@ -5346,6 +5353,8 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
       SDLoc SL(N);
       uint64_t CVal = C->getZExtValue();
+      if (canMov64b(CVal))
+        break;
       SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
                                DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
                                DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
@@ -5356,6 +5365,8 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
       const APInt &Val = C->getValueAPF().bitcastToAPInt();
       SDLoc SL(N);
       uint64_t CVal = Val.getZExtValue();
+      if (canMov64b(CVal))
+        break;
       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
                                 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
                                 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
diff --git a/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll b/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll
index 89fe0ab526a8a..6c421d50195e6 100644
--- a/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll
+++ b/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll
@@ -16,10 +16,12 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
 ; CHECK-NEXT:    s_bitcmp1_b32 s0, 8
 ; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[2:3]
-; CHECK-NEXT:    s_xor_b64 s[20:21], s[2:3], -1
 ; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 1, v0
-; CHECK-NEXT:    s_and_b64 s[2:3], exec, s[2:3]
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0x9037ab78
+; CHECK-NEXT:    v_accvgpr_write_b32 a3, v1
+; CHECK-NEXT:    s_xor_b64 s[20:21], s[2:3], -1
+; CHECK-NEXT:    s_and_b64 s[2:3], exec, s[2:3]
+; CHECK-NEXT:    v_accvgpr_write_b32 a2, v0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, 0xbe927e4f
 ; CHECK-NEXT:    v_mov_b32_e32 v4, 0x19f4ec90
 ; CHECK-NEXT:    v_mov_b32_e32 v5, 0x3efa01a0
@@ -40,7 +42,8 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
 ; CHECK-NEXT:    v_mov_b32_e32 v18, 0x55555523
 ; CHECK-NEXT:    v_mov_b32_e32 v19, 0xbfd55555
 ; CHECK-NEXT:    s_and_b64 s[6:7], exec, s[18:19]
-; CHECK-NEXT:    v_mov_b32_e32 v20, 0
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b64_e32 v[20:21], 0
 ; CHECK-NEXT:    ; implicit-def: $agpr0_agpr1
 ; CHECK-NEXT:    ; implicit-def: $vgpr22_vgpr23
 ; CHECK-NEXT:    s_branch .LBB0_2
@@ -61,9 +64,11 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
 ; CHECK-NEXT:    ; in Loop: Header=BB0_2 Depth=1
 ; CHECK-NEXT:    v_mov_b64_e32 v[24:25], s[14:15]
 ; CHECK-NEXT:    flat_load_dwordx2 v[24:25], v[24:25]
-; CHECK-NEXT:    v_mov_b64_e32 v[26:27], v[0:1]
+; CHECK-NEXT:    v_accvgpr_read_b32 v27, a3
+; CHECK-NEXT:    v_accvgpr_read_b32 v26, a2
 ; CHECK-NEXT:    v_mov_b64_e32 v[28:29], v[2:3]
 ; CHECK-NEXT:    v_mov_b64_e32 v[30:31], v[16:17]
+; CHECK-NEXT:    v_mov_b64_e32 v[20:21], 0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    v_fmac_f64_e32 v[26:27], 0, v[24:25]
 ; CHECK-NEXT:    v_fmac_f64_e32 v[28:29], 0, v[26:27]
@@ -134,10 +139,11 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
 ; CHECK-NEXT:    v_mov_b32_e32 v27, v26
 ; CHECK-NEXT:    s_and_b64 s[8:9], exec, s[16:17]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v22, v22, 0, s[16:17]
-; CHECK-NEXT:    global_store_dwordx2 v20, v[26:27], s[12:13]
+; CHECK-NEXT:    global_store_dwordx2 v0, v[26:27], s[12:13]
 ; CHECK-NEXT:    s_cselect_b32 s23, s23, 0
 ; CHECK-NEXT:    s_cselect_b32 s22, s22, 0
 ; CHECK-NEXT:    s_mov_b64 s[8:9], -1
+; CHECK-NEXT:    v_mov_b64_e32 v[20:21], 0
 ; CHECK-NEXT:    s_branch .LBB0_14
 ; CHECK-NEXT:  .LBB0_13: ; in Loop: Header=BB0_2 Depth=1
 ; CHECK-NEXT:    v_accvgpr_write_b32 a0, v24
@@ -153,9 +159,8 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2,
 ; CHECK-NEXT:    s_cbranch_vccz .LBB0_1
 ; CHECK-NEXT:  ; %bb.16: ; %._crit_edge2105.i.i.i2330
 ; CHECK-NEXT:    ; in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT:    v_mov_b32_e32 v21, v20
 ; CHECK-NEXT:    s_mov_b64 s[24:25], 0
-; CHECK-NEXT:    global_store_dwordx2 v20, v[20:21], s[12:13]
+; CHECK-NEXT:    global_store_dwordx2 v0, v[20:21], s[12:13]
 ; CHECK-NEXT:    s_branch .LBB0_1
 ; CHECK-NEXT:  .LBB0_17: ; %DummyReturnBlock
 ; CHECK-NEXT:    s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
index fc8883924dfbc..4eaa1965c66f1 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
@@ -4152,8 +4152,7 @@ define void @store_load_i64_aligned(ptr addrspace(5) nocapture %arg) {
 ; GFX942-LABEL: store_load_i64_aligned:
 ; GFX942:       ; %bb.0: ; %bb
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT:    v_mov_b32_e32 v2, 15
-; GFX942-NEXT:    v_mov_b32_e32 v3, 0
+; GFX942-NEXT:    v_mov_b64_e32 v[2:3], 15
 ; GFX942-NEXT:    scratch_store_dwordx2 v0, v[2:3], off sc0 sc1
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
 ; GFX942-NEXT:    scratch_load_dwordx2 v[0:1], v0, off sc0 sc1
@@ -4263,8 +4262,7 @@ define void @store_load_i64_unaligned(ptr addrspace(5) nocapture %arg) {
 ; GFX942-LABEL: store_load_i64_unaligned:
 ; GFX942:       ; %bb.0: ; %bb
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT:    v_mov_b32_e32 v2, 15
-; GFX942-NEXT:    v_mov_b32_e32 v3, 0
+; GFX942-NEXT:    v_mov_b64_e32 v[2:3], 15
 ; GFX942-NEXT:    scratch_store_dwordx2 v0, v[2:3], off sc0 sc1
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
 ; GFX942-NEXT:    scratch_load_dwordx2 v[0:1], v0, off sc0 sc1

>From bcc2b2d7bc3f8d1e13dadc10710e1747f3cc2c85 Mon Sep 17 00:00:00 2001
From: Janek van Oirschot <janek.vanoirschot at amd.com>
Date: Thu, 21 Aug 2025 15:37:43 +0100
Subject: [PATCH 2/2] Address comments + update tests

---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp |  18 +--
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h   |   3 +
 llvm/test/CodeGen/AMDGPU/dagcombine-select.ll | 144 ++++++++++++------
 llvm/test/CodeGen/AMDGPU/imm.ll               |   3 +-
 llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll    |  42 ++---
 llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll    |  42 ++---
 6 files changed, 163 insertions(+), 89 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 9df47c2c5ce6b..6638df95b379c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -5296,6 +5296,13 @@ SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N,
   return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0));
 }
 
+bool AMDGPUTargetLowering::canMov64bImm(uint64_t Val, SelectionDAG &DAG) const {
+  if (!Subtarget->isGCN())
+    return false;
+  auto &ST = DAG.getSubtarget<GCNSubtarget>();
+  return ST.hasMovB64() && (ST.has64BitLiterals() || isUInt<32>(Val));
+}
+
 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
                                                 DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
@@ -5338,13 +5345,6 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
     if (DestVT.getSizeInBits() != 64 || !DestVT.isVector())
       break;
 
-    auto canMov64b = [&](uint64_t Val) -> bool {
-      if (!Subtarget->isGCN())
-        return false;
-      auto &ST = DAG.getSubtarget<GCNSubtarget>();
-      return ST.hasMovB64() && (ST.has64BitLiterals() || isUInt<32>(Val));
-    };
-
     // Fold bitcasts of constants.
     //
     // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
@@ -5353,7 +5353,7 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
       SDLoc SL(N);
       uint64_t CVal = C->getZExtValue();
-      if (canMov64b(CVal))
+      if (canMov64bImm(CVal, DAG))
         break;
       SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
                                DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
@@ -5365,7 +5365,7 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
       const APInt &Val = C->getValueAPF().bitcastToAPInt();
       SDLoc SL(N);
       uint64_t CVal = Val.getZExtValue();
-      if (canMov64b(CVal))
+      if (canMov64bImm(CVal, DAG))
         break;
       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
                                 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index fd5d5b8dec431..f7a43b5bb9baf 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -103,6 +103,9 @@ class AMDGPUTargetLowering : public TargetLowering {
   SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
 
 protected:
+  /// Check whether value Val can be supported by v_mov_b64, for the current
+  /// target.
+  bool canMov64bImm(uint64_t Val, SelectionDAG &DAG) const;
   bool shouldCombineMemoryType(EVT VT) const;
   SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const;
diff --git a/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll b/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
index c429b1a32bde6..c7eb591b513f1 100644
--- a/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
+++ b/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
@@ -317,17 +317,31 @@ define amdgpu_kernel void @sel_constants_sub_constant_sel_constants_v4i32(ptr ad
 }
 
 define amdgpu_kernel void @sdiv_constant_sel_constants_i64(ptr addrspace(1) %p, i1 %cond) {
-; GCN-LABEL: sdiv_constant_sel_constants_i64:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dword s2, s[4:5], 0x2c
-; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GCN-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_bitcmp1_b32 s2, 0
-; GCN-NEXT:    s_cselect_b32 s2, 0, 5
-; GCN-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-NEXT:    global_store_dwordx2 v1, v[0:1], s[0:1]
-; GCN-NEXT:    s_endpgm
+; GFX9-LABEL: sdiv_constant_sel_constants_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s2, s[4:5], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_bitcmp1_b32 s2, 0
+; GFX9-NEXT:    s_cselect_b32 s2, 0, 5
+; GFX9-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9-NEXT:    global_store_dwordx2 v1, v[0:1], s[0:1]
+; GFX9-NEXT:    s_endpgm
+;
+; GFX942-LABEL: sdiv_constant_sel_constants_i64:
+; GFX942:       ; %bb.0:
+; GFX942-NEXT:    s_load_dword s2, s[4:5], 0x2c
+; GFX942-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX942-NEXT:    v_mov_b32_e32 v0, 0
+; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX942-NEXT:    s_bitcmp1_b32 s2, 0
+; GFX942-NEXT:    s_cselect_b32 s2, 0, 0
+; GFX942-NEXT:    s_cselect_b32 s3, 0, 5
+; GFX942-NEXT:    v_mov_b32_e32 v2, s3
+; GFX942-NEXT:    v_mov_b32_e32 v3, s2
+; GFX942-NEXT:    global_store_dwordx2 v0, v[2:3], s[0:1]
+; GFX942-NEXT:    s_endpgm
   %sel = select i1 %cond, i64 121, i64 23
   %bo = sdiv i64 120, %sel
   store i64 %bo, ptr addrspace(1) %p, align 8
@@ -353,17 +367,31 @@ define amdgpu_kernel void @sdiv_constant_sel_constants_i32(ptr addrspace(1) %p,
 }
 
 define amdgpu_kernel void @udiv_constant_sel_constants_i64(ptr addrspace(1) %p, i1 %cond) {
-; GCN-LABEL: udiv_constant_sel_constants_i64:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dword s2, s[4:5], 0x2c
-; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GCN-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_bitcmp1_b32 s2, 0
-; GCN-NEXT:    s_cselect_b32 s2, 0, 5
-; GCN-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-NEXT:    global_store_dwordx2 v1, v[0:1], s[0:1]
-; GCN-NEXT:    s_endpgm
+; GFX9-LABEL: udiv_constant_sel_constants_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s2, s[4:5], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_bitcmp1_b32 s2, 0
+; GFX9-NEXT:    s_cselect_b32 s2, 0, 5
+; GFX9-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9-NEXT:    global_store_dwordx2 v1, v[0:1], s[0:1]
+; GFX9-NEXT:    s_endpgm
+;
+; GFX942-LABEL: udiv_constant_sel_constants_i64:
+; GFX942:       ; %bb.0:
+; GFX942-NEXT:    s_load_dword s2, s[4:5], 0x2c
+; GFX942-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX942-NEXT:    v_mov_b32_e32 v0, 0
+; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX942-NEXT:    s_bitcmp1_b32 s2, 0
+; GFX942-NEXT:    s_cselect_b32 s2, 0, 0
+; GFX942-NEXT:    s_cselect_b32 s3, 0, 5
+; GFX942-NEXT:    v_mov_b32_e32 v2, s3
+; GFX942-NEXT:    v_mov_b32_e32 v3, s2
+; GFX942-NEXT:    global_store_dwordx2 v0, v[2:3], s[0:1]
+; GFX942-NEXT:    s_endpgm
   %sel = select i1 %cond, i64 -4, i64 23
   %bo = udiv i64 120, %sel
   store i64 %bo, ptr addrspace(1) %p, align 8
@@ -371,17 +399,31 @@ define amdgpu_kernel void @udiv_constant_sel_constants_i64(ptr addrspace(1) %p,
 }
 
 define amdgpu_kernel void @srem_constant_sel_constants(ptr addrspace(1) %p, i1 %cond) {
-; GCN-LABEL: srem_constant_sel_constants:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dword s2, s[4:5], 0x2c
-; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GCN-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_bitcmp1_b32 s2, 0
-; GCN-NEXT:    s_cselect_b32 s2, 33, 3
-; GCN-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-NEXT:    global_store_dwordx2 v1, v[0:1], s[0:1]
-; GCN-NEXT:    s_endpgm
+; GFX9-LABEL: srem_constant_sel_constants:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s2, s[4:5], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_bitcmp1_b32 s2, 0
+; GFX9-NEXT:    s_cselect_b32 s2, 33, 3
+; GFX9-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9-NEXT:    global_store_dwordx2 v1, v[0:1], s[0:1]
+; GFX9-NEXT:    s_endpgm
+;
+; GFX942-LABEL: srem_constant_sel_constants:
+; GFX942:       ; %bb.0:
+; GFX942-NEXT:    s_load_dword s2, s[4:5], 0x2c
+; GFX942-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX942-NEXT:    v_mov_b32_e32 v0, 0
+; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX942-NEXT:    s_bitcmp1_b32 s2, 0
+; GFX942-NEXT:    s_cselect_b32 s2, 0, 0
+; GFX942-NEXT:    s_cselect_b32 s3, 33, 3
+; GFX942-NEXT:    v_mov_b32_e32 v2, s3
+; GFX942-NEXT:    v_mov_b32_e32 v3, s2
+; GFX942-NEXT:    global_store_dwordx2 v0, v[2:3], s[0:1]
+; GFX942-NEXT:    s_endpgm
   %sel = select i1 %cond, i64 34, i64 15
   %bo = srem i64 33, %sel
   store i64 %bo, ptr addrspace(1) %p, align 8
@@ -389,17 +431,31 @@ define amdgpu_kernel void @srem_constant_sel_constants(ptr addrspace(1) %p, i1 %
 }
 
 define amdgpu_kernel void @urem_constant_sel_constants(ptr addrspace(1) %p, i1 %cond) {
-; GCN-LABEL: urem_constant_sel_constants:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dword s2, s[4:5], 0x2c
-; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GCN-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_bitcmp1_b32 s2, 0
-; GCN-NEXT:    s_cselect_b32 s2, 33, 3
-; GCN-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-NEXT:    global_store_dwordx2 v1, v[0:1], s[0:1]
-; GCN-NEXT:    s_endpgm
+; GFX9-LABEL: urem_constant_sel_constants:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s2, s[4:5], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_bitcmp1_b32 s2, 0
+; GFX9-NEXT:    s_cselect_b32 s2, 33, 3
+; GFX9-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9-NEXT:    global_store_dwordx2 v1, v[0:1], s[0:1]
+; GFX9-NEXT:    s_endpgm
+;
+; GFX942-LABEL: urem_constant_sel_constants:
+; GFX942:       ; %bb.0:
+; GFX942-NEXT:    s_load_dword s2, s[4:5], 0x2c
+; GFX942-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX942-NEXT:    v_mov_b32_e32 v0, 0
+; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX942-NEXT:    s_bitcmp1_b32 s2, 0
+; GFX942-NEXT:    s_cselect_b32 s2, 0, 0
+; GFX942-NEXT:    s_cselect_b32 s3, 33, 3
+; GFX942-NEXT:    v_mov_b32_e32 v2, s3
+; GFX942-NEXT:    v_mov_b32_e32 v3, s2
+; GFX942-NEXT:    global_store_dwordx2 v0, v[2:3], s[0:1]
+; GFX942-NEXT:    s_endpgm
   %sel = select i1 %cond, i64 34, i64 15
   %bo = urem i64 33, %sel
   store i64 %bo, ptr addrspace(1) %p, align 8
diff --git a/llvm/test/CodeGen/AMDGPU/imm.ll b/llvm/test/CodeGen/AMDGPU/imm.ll
index 21390003ee565..3735796b65a19 100644
--- a/llvm/test/CodeGen/AMDGPU/imm.ll
+++ b/llvm/test/CodeGen/AMDGPU/imm.ll
@@ -2163,10 +2163,9 @@ define amdgpu_kernel void @store_inline_imm_0.0_f64(ptr addrspace(1) %out) {
 ; GFX942-LABEL: store_inline_imm_0.0_f64:
 ; GFX942:       ; %bb.0:
 ; GFX942-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GFX942-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX942-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX942-NEXT:    s_mov_b32 s2, -1
-; GFX942-NEXT:    v_mov_b32_e32 v1, v0
+; GFX942-NEXT:    v_mov_b64_e32 v[0:1], 0
 ; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX942-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GFX942-NEXT:    s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll b/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
index 101787abf8ea7..21dd1db6c714b 100644
--- a/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
@@ -93,8 +93,10 @@ define amdgpu_kernel void @sint_to_fp_i1_f64(ptr addrspace(1) %out, i32 %in) {
 ; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX942-NEXT:    s_cmp_eq_u32 s2, 0
 ; GFX942-NEXT:    s_cselect_b32 s2, 0xbff00000, 0
-; GFX942-NEXT:    v_mov_b32_e32 v1, s2
-; GFX942-NEXT:    global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX942-NEXT:    s_cselect_b32 s3, 0, 0
+; GFX942-NEXT:    v_mov_b32_e32 v2, s3
+; GFX942-NEXT:    v_mov_b32_e32 v3, s2
+; GFX942-NEXT:    global_store_dwordx2 v0, v[2:3], s[0:1]
 ; GFX942-NEXT:    s_endpgm
   %cmp = icmp eq i32 %in, 0
   %fp = sitofp i1 %cmp to double
@@ -388,8 +390,10 @@ define amdgpu_kernel void @s_select_sint_to_fp_i1_vals_f64(ptr addrspace(1) %out
 ; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX942-NEXT:    s_cmp_eq_u32 s2, 0
 ; GFX942-NEXT:    s_cselect_b32 s2, 0xbff00000, 0
-; GFX942-NEXT:    v_mov_b32_e32 v1, s2
-; GFX942-NEXT:    global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX942-NEXT:    s_cselect_b32 s3, 0, 0
+; GFX942-NEXT:    v_mov_b32_e32 v2, s3
+; GFX942-NEXT:    v_mov_b32_e32 v3, s2
+; GFX942-NEXT:    global_store_dwordx2 v0, v[2:3], s[0:1]
 ; GFX942-NEXT:    s_endpgm
   %cmp = icmp eq i32 %in, 0
   %select = select i1 %cmp, double -1.0, double 0.0
@@ -414,10 +418,10 @@ define void @v_select_sint_to_fp_i1_vals_f64(ptr addrspace(1) %out, i32 %in) {
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    v_mov_b32_e32 v3, 0xbff00000
 ; GFX942-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GFX942-NEXT:    v_mov_b32_e32 v4, 0
+; GFX942-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX942-NEXT:    s_nop 0
-; GFX942-NEXT:    v_cndmask_b32_e32 v5, 0, v3, vcc
-; GFX942-NEXT:    global_store_dwordx2 v[0:1], v[4:5], off
+; GFX942-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX942-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
 ; GFX942-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %in, 0
@@ -469,8 +473,10 @@ define amdgpu_kernel void @s_select_sint_to_fp_i1_vals_i64(ptr addrspace(1) %out
 ; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX942-NEXT:    s_cmp_eq_u32 s2, 0
 ; GFX942-NEXT:    s_cselect_b32 s2, 0xbff00000, 0
-; GFX942-NEXT:    v_mov_b32_e32 v1, s2
-; GFX942-NEXT:    global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX942-NEXT:    s_cselect_b32 s3, 0, 0
+; GFX942-NEXT:    v_mov_b32_e32 v2, s3
+; GFX942-NEXT:    v_mov_b32_e32 v3, s2
+; GFX942-NEXT:    global_store_dwordx2 v0, v[2:3], s[0:1]
 ; GFX942-NEXT:    s_endpgm
   %cmp = icmp eq i32 %in, 0
   %select = select i1 %cmp, i64 u0xbff0000000000000, i64 0
@@ -495,10 +501,10 @@ define void @v_select_sint_to_fp_i1_vals_i64(ptr addrspace(1) %out, i32 %in) {
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    v_mov_b32_e32 v3, 0xbff00000
 ; GFX942-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GFX942-NEXT:    v_mov_b32_e32 v4, 0
+; GFX942-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX942-NEXT:    s_nop 0
-; GFX942-NEXT:    v_cndmask_b32_e32 v5, 0, v3, vcc
-; GFX942-NEXT:    global_store_dwordx2 v[0:1], v[4:5], off
+; GFX942-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX942-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
 ; GFX942-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %in, 0
@@ -525,10 +531,10 @@ define void @v_swap_select_sint_to_fp_i1_vals_f64(ptr addrspace(1) %out, i32 %in
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    v_mov_b32_e32 v3, 0xbff00000
 ; GFX942-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GFX942-NEXT:    v_mov_b32_e32 v4, 0
+; GFX942-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX942-NEXT:    s_nop 0
-; GFX942-NEXT:    v_cndmask_b32_e64 v5, v3, 0, vcc
-; GFX942-NEXT:    global_store_dwordx2 v[0:1], v[4:5], off
+; GFX942-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX942-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
 ; GFX942-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %in, 0
@@ -581,8 +587,10 @@ define amdgpu_kernel void @s_swap_select_sint_to_fp_i1_vals_f64(ptr addrspace(1)
 ; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX942-NEXT:    s_cmp_eq_u32 s2, 0
 ; GFX942-NEXT:    s_cselect_b32 s2, 0, 0xbff00000
-; GFX942-NEXT:    v_mov_b32_e32 v1, s2
-; GFX942-NEXT:    global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX942-NEXT:    s_cselect_b32 s3, 0, 0
+; GFX942-NEXT:    v_mov_b32_e32 v2, s3
+; GFX942-NEXT:    v_mov_b32_e32 v3, s2
+; GFX942-NEXT:    global_store_dwordx2 v0, v[2:3], s[0:1]
 ; GFX942-NEXT:    s_endpgm
   %cmp = icmp eq i32 %in, 0
   %select = select i1 %cmp, double 0.0, double -1.0
diff --git a/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll b/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
index 983acfc2c0699..816c6d57631df 100644
--- a/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
@@ -469,8 +469,10 @@ define amdgpu_kernel void @uint_to_fp_i1_to_f64(ptr addrspace(1) %out, i32 %in)
 ; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX942-NEXT:    s_cmp_eq_u32 s2, 0
 ; GFX942-NEXT:    s_cselect_b32 s2, 0x3ff00000, 0
-; GFX942-NEXT:    v_mov_b32_e32 v1, s2
-; GFX942-NEXT:    global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX942-NEXT:    s_cselect_b32 s3, 0, 0
+; GFX942-NEXT:    v_mov_b32_e32 v2, s3
+; GFX942-NEXT:    v_mov_b32_e32 v3, s2
+; GFX942-NEXT:    global_store_dwordx2 v0, v[2:3], s[0:1]
 ; GFX942-NEXT:    s_endpgm
   %cmp = icmp eq i32 %in, 0
   %fp = uitofp i1 %cmp to double
@@ -647,8 +649,10 @@ define amdgpu_kernel void @s_select_uint_to_fp_i1_vals_f64(ptr addrspace(1) %out
 ; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX942-NEXT:    s_cmp_eq_u32 s2, 0
 ; GFX942-NEXT:    s_cselect_b32 s2, 0x3ff00000, 0
-; GFX942-NEXT:    v_mov_b32_e32 v1, s2
-; GFX942-NEXT:    global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX942-NEXT:    s_cselect_b32 s3, 0, 0
+; GFX942-NEXT:    v_mov_b32_e32 v2, s3
+; GFX942-NEXT:    v_mov_b32_e32 v3, s2
+; GFX942-NEXT:    global_store_dwordx2 v0, v[2:3], s[0:1]
 ; GFX942-NEXT:    s_endpgm
   %cmp = icmp eq i32 %in, 0
   %select = select i1 %cmp, double 1.0, double 0.0
@@ -673,10 +677,10 @@ define void @v_select_uint_to_fp_i1_vals_f64(ptr addrspace(1) %out, i32 %in) {
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    v_mov_b32_e32 v3, 0x3ff00000
 ; GFX942-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GFX942-NEXT:    v_mov_b32_e32 v4, 0
+; GFX942-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX942-NEXT:    s_nop 0
-; GFX942-NEXT:    v_cndmask_b32_e32 v5, 0, v3, vcc
-; GFX942-NEXT:    global_store_dwordx2 v[0:1], v[4:5], off
+; GFX942-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX942-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
 ; GFX942-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %in, 0
@@ -728,8 +732,10 @@ define amdgpu_kernel void @s_select_uint_to_fp_i1_vals_i64(ptr addrspace(1) %out
 ; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX942-NEXT:    s_cmp_eq_u32 s2, 0
 ; GFX942-NEXT:    s_cselect_b32 s2, 0x3ff00000, 0
-; GFX942-NEXT:    v_mov_b32_e32 v1, s2
-; GFX942-NEXT:    global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX942-NEXT:    s_cselect_b32 s3, 0, 0
+; GFX942-NEXT:    v_mov_b32_e32 v2, s3
+; GFX942-NEXT:    v_mov_b32_e32 v3, s2
+; GFX942-NEXT:    global_store_dwordx2 v0, v[2:3], s[0:1]
 ; GFX942-NEXT:    s_endpgm
   %cmp = icmp eq i32 %in, 0
   %select = select i1 %cmp, i64 u0x3ff0000000000000, i64 0
@@ -754,10 +760,10 @@ define void @v_select_uint_to_fp_i1_vals_i64(ptr addrspace(1) %out, i32 %in) {
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    v_mov_b32_e32 v3, 0x3ff00000
 ; GFX942-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GFX942-NEXT:    v_mov_b32_e32 v4, 0
+; GFX942-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX942-NEXT:    s_nop 0
-; GFX942-NEXT:    v_cndmask_b32_e32 v5, 0, v3, vcc
-; GFX942-NEXT:    global_store_dwordx2 v[0:1], v[4:5], off
+; GFX942-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX942-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
 ; GFX942-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %in, 0
@@ -810,8 +816,10 @@ define amdgpu_kernel void @s_swap_select_uint_to_fp_i1_vals_f64(ptr addrspace(1)
 ; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX942-NEXT:    s_cmp_eq_u32 s2, 0
 ; GFX942-NEXT:    s_cselect_b32 s2, 0, 0x3ff00000
-; GFX942-NEXT:    v_mov_b32_e32 v1, s2
-; GFX942-NEXT:    global_store_dwordx2 v0, v[0:1], s[0:1]
+; GFX942-NEXT:    s_cselect_b32 s3, 0, 0
+; GFX942-NEXT:    v_mov_b32_e32 v2, s3
+; GFX942-NEXT:    v_mov_b32_e32 v3, s2
+; GFX942-NEXT:    global_store_dwordx2 v0, v[2:3], s[0:1]
 ; GFX942-NEXT:    s_endpgm
   %cmp = icmp eq i32 %in, 0
   %select = select i1 %cmp, double 0.0, double 1.0
@@ -836,10 +844,10 @@ define void @v_swap_select_uint_to_fp_i1_vals_f64(ptr addrspace(1) %out, i32 %in
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    v_mov_b32_e32 v3, 0x3ff00000
 ; GFX942-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GFX942-NEXT:    v_mov_b32_e32 v4, 0
+; GFX942-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX942-NEXT:    s_nop 0
-; GFX942-NEXT:    v_cndmask_b32_e64 v5, v3, 0, vcc
-; GFX942-NEXT:    global_store_dwordx2 v[0:1], v[4:5], off
+; GFX942-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX942-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
 ; GFX942-NEXT:    s_waitcnt vmcnt(0)
 ; GFX942-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %in, 0



More information about the llvm-commits mailing list