[llvm] [AMDGPU] Convert 64-bit sra to 32-bit if shift amt >= 32 (PR #144421)

via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 20 07:46:13 PDT 2025


https://github.com/LU-JOHN updated https://github.com/llvm/llvm-project/pull/144421

>From 7d8a0d6717f9492b9142d6d4247bff61c941949c Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Wed, 7 May 2025 10:54:12 -0500
Subject: [PATCH 1/8] Use computeKnownBits to simplify vector/scalar sra

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 104 +++++++++++++++---
 1 file changed, 86 insertions(+), 18 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index c51cc2a2fe529..9a50eac799e3a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4151,32 +4151,100 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
 
 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
                                                 DAGCombinerInfo &DCI) const {
-  if (N->getValueType(0) != MVT::i64)
+  SDValue RHS = N->getOperand(1);
+  ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
+  EVT VT = N->getValueType(0);
+  SDValue LHS = N->getOperand(0);
+  SelectionDAG &DAG = DCI.DAG;
+  SDLoc SL(N);
+  
+  if (VT.getScalarType() != MVT::i64)
     return SDValue();
 
-  const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
-  if (!RHS)
+  // for C >= 32
+  // i64 (sra x, C) -> (build_pair (sra hi_32(x), C -32), sra hi_32(x), 31))
+
+  // On some subtargets, 64-bit shift is a quarter rate instruction. In the
+  // common case, splitting this into a move and a 32-bit shift is faster and
+  // the same code size.
+  KnownBits Known = DAG.computeKnownBits(RHS);
+
+  EVT ElementType = VT.getScalarType();
+  EVT TargetScalarType = ElementType.getHalfSizedIntegerVT(*DAG.getContext());
+  EVT TargetType = VT.isVector() ? VT.changeVectorElementType(TargetScalarType)
+                                 : TargetScalarType;
+
+  if (Known.getMinValue().getZExtValue() < TargetScalarType.getSizeInBits())
     return SDValue();
 
-  SelectionDAG &DAG = DCI.DAG;
-  SDLoc SL(N);
-  unsigned RHSVal = RHS->getZExtValue();
+  SDValue ShiftFullAmt =
+      DAG.getConstant(TargetScalarType.getSizeInBits() - 1, SL, TargetType);
+  SDValue ShiftAmt;
+  if (CRHS) {
+    unsigned RHSVal = CRHS->getZExtValue();
 
-  // For C >= 32
-  // (sra i64:x, C) -> build_pair (sra hi_32(x), C - 32), (sra hi_32(x), 31)
-  if (RHSVal >= 32) {
-    SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
-    Hi = DAG.getFreeze(Hi);
-    SDValue HiShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
-                                  DAG.getConstant(31, SL, MVT::i32));
-    SDValue LoShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
-                                  DAG.getConstant(RHSVal - 32, SL, MVT::i32));
+    ShiftAmt = DAG.getConstant(RHSVal - TargetScalarType.getSizeInBits(), SL,
+                               TargetType);
+  } else {
+    SDValue truncShiftAmt = DAG.getNode(ISD::TRUNCATE, SL, TargetType, RHS);
+    const SDValue ShiftMask =
+        DAG.getConstant(TargetScalarType.getSizeInBits() - 1, SL, TargetType);
+    // This AND instruction will clamp out of bounds shift values.
+    // It will also be removed during later instruction selection.
+    ShiftAmt = DAG.getNode(ISD::AND, SL, TargetType, truncShiftAmt, ShiftMask);
+  }
 
-    SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {LoShift, HiShift});
-    return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
+  EVT ConcatType;
+  SDValue Hi;
+  SDLoc LHSSL(LHS);
+  // Bitcast LHS into ConcatType so hi-half of source can be extracted into Hi
+  if (VT.isVector()) {
+    unsigned NElts = TargetType.getVectorNumElements();
+    ConcatType = TargetType.getDoubleNumVectorElementsVT(*DAG.getContext());
+    SDValue SplitLHS = DAG.getNode(ISD::BITCAST, LHSSL, ConcatType, LHS);
+    SmallVector<SDValue, 8> HiOps(NElts);
+    SmallVector<SDValue, 16> HiAndLoOps;
+
+    DAG.ExtractVectorElements(SplitLHS, HiAndLoOps, 0, NElts * 2);
+    for (unsigned I = 0; I != NElts; ++I) {
+      HiOps[I] = HiAndLoOps[2 * I + 1];
+    }
+    Hi = DAG.getNode(ISD::BUILD_VECTOR, LHSSL, TargetType, HiOps);
+  } else {
+    const SDValue One = DAG.getConstant(1, LHSSL, TargetScalarType);
+    ConcatType = EVT::getVectorVT(*DAG.getContext(), TargetType, 2);
+    SDValue SplitLHS = DAG.getNode(ISD::BITCAST, LHSSL, ConcatType, LHS);
+    Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, LHSSL, TargetType, SplitLHS, One);
   }
 
-  return SDValue();
+  SDValue HiShift = DAG.getNode(ISD::SRA, SL, TargetType, Hi, ShiftFullAmt);
+  SDValue NewShift = DAG.getNode(ISD::SRA, SL, TargetType, Hi, ShiftAmt);
+
+  SDValue Vec;
+  if (VT.isVector()) {
+    unsigned NElts = TargetType.getVectorNumElements();
+    SmallVector<SDValue, 8> HiOps;
+    SmallVector<SDValue, 8> LoOps;
+    SmallVector<SDValue, 16> HiAndLoOps(NElts * 2);
+
+    DAG.ExtractVectorElements(HiShift, HiOps, 0, NElts);
+    DAG.ExtractVectorElements(NewShift, LoOps, 0, NElts);
+    for (unsigned I = 0; I != NElts; ++I) {
+      HiAndLoOps[2 * I + 1] = HiOps[I];
+      if (Known.getMinValue().getZExtValue() ==
+          (ElementType.getSizeInBits() - 1))
+        HiAndLoOps[2 * I] = HiOps[I];
+      else
+        HiAndLoOps[2 * I] = LoOps[I];
+    }
+    Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, ConcatType, HiAndLoOps);
+  } else {
+    if (Known.getMinValue().getZExtValue() == (ElementType.getSizeInBits() - 1))
+      Vec = DAG.getBuildVector(ConcatType, SL, {HiShift, HiShift});
+    else
+      Vec = DAG.getBuildVector(ConcatType, SL, {NewShift, HiShift});
+  }
+  return DAG.getNode(ISD::BITCAST, SL, VT, Vec);
 }
 
 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,

>From de9ed43a0508f6a209b16d8d719036f2bea4453a Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Fri, 6 Jun 2025 17:28:03 -0500
Subject: [PATCH 2/8] Update tests

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/test/CodeGen/AMDGPU/ashr64_reduce.ll     | 109 ++++++------
 llvm/test/CodeGen/AMDGPU/fptoi.i128.ll        |  28 +---
 llvm/test/CodeGen/AMDGPU/load-constant-i16.ll |  16 +-
 llvm/test/CodeGen/AMDGPU/load-global-i16.ll   |   4 +-
 llvm/test/CodeGen/AMDGPU/mul_int24.ll         |  74 ++++-----
 llvm/test/CodeGen/AMDGPU/sra.ll               | 157 +++++++++---------
 6 files changed, 196 insertions(+), 192 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/ashr64_reduce.ll b/llvm/test/CodeGen/AMDGPU/ashr64_reduce.ll
index 0718a17dfd06d..e24d74f03a2ba 100644
--- a/llvm/test/CodeGen/AMDGPU/ashr64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/ashr64_reduce.ll
@@ -17,9 +17,11 @@ define i64 @ashr_metadata(i64 %arg0, ptr %arg1.ptr) {
 ; CHECK-LABEL: ashr_metadata:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    flat_load_dword v2, v[2:3]
+; CHECK-NEXT:    flat_load_dword v0, v[2:3]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_ashrrev_i64 v[0:1], v2, v[0:1]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v0, v0, v1
+; CHECK-NEXT:    v_mov_b32_e32 v1, v2
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %shift.amt = load i64, ptr %arg1.ptr, !range !0, !noundef !{}
   %ashr = ashr i64 %arg0, %shift.amt
@@ -29,9 +31,11 @@ define i64 @ashr_metadata(i64 %arg0, ptr %arg1.ptr) {
 define amdgpu_ps i64 @ashr_metadata_sgpr_return(i64 inreg %arg0, ptr addrspace(1) inreg %arg1.ptr) {
 ; CHECK-LABEL: ashr_metadata_sgpr_return:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_load_dword s2, s[2:3], 0x0
+; CHECK-NEXT:    s_load_dword s0, s[2:3], 0x0
+; CHECK-NEXT:    s_ashr_i32 s2, s1, 31
 ; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
-; CHECK-NEXT:    s_ashr_i64 s[0:1], s[0:1], s2
+; CHECK-NEXT:    s_ashr_i32 s0, s1, s0
+; CHECK-NEXT:    s_mov_b32 s1, s2
 ; CHECK-NEXT:    ; return to shader part epilog
   %shift.amt = load i64, ptr addrspace(1) %arg1.ptr, !range !0, !noundef !{}
   %ashr = ashr i64 %arg0, %shift.amt
@@ -43,9 +47,11 @@ define i64 @ashr_exact_metadata(i64 %arg0, ptr %arg1.ptr) {
 ; CHECK-LABEL: ashr_exact_metadata:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    flat_load_dword v2, v[2:3]
+; CHECK-NEXT:    flat_load_dword v0, v[2:3]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_ashrrev_i64 v[0:1], v2, v[0:1]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v0, v0, v1
+; CHECK-NEXT:    v_mov_b32_e32 v1, v2
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %shift.amt = load i64, ptr %arg1.ptr, !range !0, !noundef !{}
   %ashr = ashr exact i64 %arg0, %shift.amt
@@ -56,9 +62,11 @@ define i64 @ashr_metadata_two_ranges(i64 %arg0, ptr %arg1.ptr) {
 ; CHECK-LABEL: ashr_metadata_two_ranges:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    flat_load_dword v2, v[2:3]
+; CHECK-NEXT:    flat_load_dword v0, v[2:3]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_ashrrev_i64 v[0:1], v2, v[0:1]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v0, v0, v1
+; CHECK-NEXT:    v_mov_b32_e32 v1, v2
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %shift.amt = load i64, ptr %arg1.ptr, !range !1, !noundef !{}
   %ashr = ashr i64 %arg0, %shift.amt
@@ -333,8 +341,8 @@ define i64 @ashr_or32(i64 %arg0, i64 %shift_amt) {
 ; CHECK-LABEL: ashr_or32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v2, 32, v2
-; CHECK-NEXT:    v_ashrrev_i64 v[0:1], v2, v[0:1]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v0, v2, v1
+; CHECK-NEXT:    v_ashrrev_i32_e32 v1, 31, v1
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %or = or i64 %shift_amt, 32
   %ashr = ashr i64 %arg0, %or
@@ -345,10 +353,10 @@ define <2 x i64> @ashr_v2_or32(<2 x i64> %arg0, <2 x i64> %shift_amt) {
 ; CHECK-LABEL: ashr_v2_or32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v5, 32, v6
-; CHECK-NEXT:    v_or_b32_e32 v4, 32, v4
-; CHECK-NEXT:    v_ashrrev_i64 v[0:1], v4, v[0:1]
-; CHECK-NEXT:    v_ashrrev_i64 v[2:3], v5, v[2:3]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v0, v4, v1
+; CHECK-NEXT:    v_ashrrev_i32_e32 v1, 31, v1
+; CHECK-NEXT:    v_ashrrev_i32_e32 v2, v6, v3
+; CHECK-NEXT:    v_ashrrev_i32_e32 v3, 31, v3
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %or = or <2 x i64> %shift_amt, splat (i64 32)
   %ashr = ashr <2 x i64> %arg0, %or
@@ -359,12 +367,12 @@ define <3 x i64> @ashr_v3_or32(<3 x i64> %arg0, <3 x i64> %shift_amt) {
 ; CHECK-LABEL: ashr_v3_or32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v7, 32, v10
-; CHECK-NEXT:    v_or_b32_e32 v8, 32, v8
-; CHECK-NEXT:    v_or_b32_e32 v6, 32, v6
-; CHECK-NEXT:    v_ashrrev_i64 v[0:1], v6, v[0:1]
-; CHECK-NEXT:    v_ashrrev_i64 v[2:3], v8, v[2:3]
-; CHECK-NEXT:    v_ashrrev_i64 v[4:5], v7, v[4:5]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v0, v6, v1
+; CHECK-NEXT:    v_ashrrev_i32_e32 v1, 31, v1
+; CHECK-NEXT:    v_ashrrev_i32_e32 v2, v8, v3
+; CHECK-NEXT:    v_ashrrev_i32_e32 v3, 31, v3
+; CHECK-NEXT:    v_ashrrev_i32_e32 v4, v10, v5
+; CHECK-NEXT:    v_ashrrev_i32_e32 v5, 31, v5
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %or = or <3 x i64> %shift_amt, splat (i64 32)
   %ashr = ashr <3 x i64> %arg0, %or
@@ -375,14 +383,14 @@ define <4 x i64> @ashr_v4_or32(<4 x i64> %arg0, <4 x i64> %shift_amt) {
 ; CHECK-LABEL: ashr_v4_or32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v9, 32, v14
-; CHECK-NEXT:    v_or_b32_e32 v11, 32, v12
-; CHECK-NEXT:    v_or_b32_e32 v10, 32, v10
-; CHECK-NEXT:    v_or_b32_e32 v8, 32, v8
-; CHECK-NEXT:    v_ashrrev_i64 v[0:1], v8, v[0:1]
-; CHECK-NEXT:    v_ashrrev_i64 v[2:3], v10, v[2:3]
-; CHECK-NEXT:    v_ashrrev_i64 v[4:5], v11, v[4:5]
-; CHECK-NEXT:    v_ashrrev_i64 v[6:7], v9, v[6:7]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v0, v8, v1
+; CHECK-NEXT:    v_ashrrev_i32_e32 v1, 31, v1
+; CHECK-NEXT:    v_ashrrev_i32_e32 v2, v10, v3
+; CHECK-NEXT:    v_ashrrev_i32_e32 v3, 31, v3
+; CHECK-NEXT:    v_ashrrev_i32_e32 v4, v12, v5
+; CHECK-NEXT:    v_ashrrev_i32_e32 v5, 31, v5
+; CHECK-NEXT:    v_ashrrev_i32_e32 v6, v14, v7
+; CHECK-NEXT:    v_ashrrev_i32_e32 v7, 31, v7
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %or = or <4 x i64> %shift_amt, splat (i64 32)
   %ashr = ashr <4 x i64> %arg0, %or
@@ -395,8 +403,8 @@ define i64 @ashr_or32_sgpr(i64 inreg %arg0, i64 inreg %shift_amt) {
 ; CHECK-LABEL: ashr_or32_sgpr:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_or_b32 s4, s18, 32
-; CHECK-NEXT:    s_ashr_i64 s[4:5], s[16:17], s4
+; CHECK-NEXT:    s_ashr_i32 s4, s17, s18
+; CHECK-NEXT:    s_ashr_i32 s5, s17, 31
 ; CHECK-NEXT:    v_mov_b32_e32 v0, s4
 ; CHECK-NEXT:    v_mov_b32_e32 v1, s5
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
@@ -408,8 +416,8 @@ define i64 @ashr_or32_sgpr(i64 inreg %arg0, i64 inreg %shift_amt) {
 define amdgpu_ps i64 @ashr_or32_sgpr_return(i64 inreg %arg0, i64 inreg %shift_amt) {
 ; CHECK-LABEL: ashr_or32_sgpr_return:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_or_b32 s2, s2, 32
-; CHECK-NEXT:    s_ashr_i64 s[0:1], s[0:1], s2
+; CHECK-NEXT:    s_ashr_i32 s0, s1, s2
+; CHECK-NEXT:    s_ashr_i32 s1, s1, 31
 ; CHECK-NEXT:    ; return to shader part epilog
   %or = or i64 %shift_amt, 32
   %ashr = ashr i64 %arg0, %or
@@ -420,10 +428,10 @@ define <2 x i64> @ashr_v2_or32_sgpr(<2 x i64> inreg %arg0, <2 x i64> inreg %shif
 ; CHECK-LABEL: ashr_v2_or32_sgpr:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_or_b32 s6, s22, 32
-; CHECK-NEXT:    s_or_b32 s4, s20, 32
-; CHECK-NEXT:    s_ashr_i64 s[4:5], s[16:17], s4
-; CHECK-NEXT:    s_ashr_i64 s[6:7], s[18:19], s6
+; CHECK-NEXT:    s_ashr_i32 s4, s17, s20
+; CHECK-NEXT:    s_ashr_i32 s5, s17, 31
+; CHECK-NEXT:    s_ashr_i32 s6, s19, s22
+; CHECK-NEXT:    s_ashr_i32 s7, s19, 31
 ; CHECK-NEXT:    v_mov_b32_e32 v0, s4
 ; CHECK-NEXT:    v_mov_b32_e32 v1, s5
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s6
@@ -438,12 +446,12 @@ define <3 x i64> @ashr_v3_or32_sgpr(<3 x i64> inreg %arg0, <3 x i64> inreg %shif
 ; CHECK-LABEL: ashr_v3_or32_sgpr:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_or_b32 s8, s26, 32
-; CHECK-NEXT:    s_or_b32 s6, s24, 32
-; CHECK-NEXT:    s_or_b32 s4, s22, 32
-; CHECK-NEXT:    s_ashr_i64 s[4:5], s[16:17], s4
-; CHECK-NEXT:    s_ashr_i64 s[6:7], s[18:19], s6
-; CHECK-NEXT:    s_ashr_i64 s[8:9], s[20:21], s8
+; CHECK-NEXT:    s_ashr_i32 s4, s17, s22
+; CHECK-NEXT:    s_ashr_i32 s5, s17, 31
+; CHECK-NEXT:    s_ashr_i32 s6, s19, s24
+; CHECK-NEXT:    s_ashr_i32 s7, s19, 31
+; CHECK-NEXT:    s_ashr_i32 s8, s21, s26
+; CHECK-NEXT:    s_ashr_i32 s9, s21, 31
 ; CHECK-NEXT:    v_mov_b32_e32 v0, s4
 ; CHECK-NEXT:    v_mov_b32_e32 v1, s5
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s6
@@ -460,20 +468,21 @@ define <4 x i64> @ashr_v4_or32_sgpr(<4 x i64> inreg %arg0, <4 x i64> inreg %shif
 ; CHECK-LABEL: ashr_v4_or32_sgpr:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v0, 32, v0
-; CHECK-NEXT:    s_or_b32 s8, s28, 32
-; CHECK-NEXT:    s_or_b32 s6, s26, 32
-; CHECK-NEXT:    s_or_b32 s4, s24, 32
-; CHECK-NEXT:    s_ashr_i64 s[4:5], s[16:17], s4
-; CHECK-NEXT:    s_ashr_i64 s[6:7], s[18:19], s6
-; CHECK-NEXT:    s_ashr_i64 s[8:9], s[20:21], s8
-; CHECK-NEXT:    v_ashrrev_i64 v[6:7], v0, s[22:23]
+; CHECK-NEXT:    s_ashr_i32 s4, s17, s24
+; CHECK-NEXT:    s_ashr_i32 s5, s17, 31
+; CHECK-NEXT:    s_ashr_i32 s6, s19, s26
+; CHECK-NEXT:    s_ashr_i32 s7, s19, 31
+; CHECK-NEXT:    s_ashr_i32 s8, s21, s28
+; CHECK-NEXT:    s_ashr_i32 s9, s21, 31
+; CHECK-NEXT:    s_ashr_i32 s10, s23, 31
+; CHECK-NEXT:    v_ashrrev_i32_e64 v6, v0, s23
 ; CHECK-NEXT:    v_mov_b32_e32 v0, s4
 ; CHECK-NEXT:    v_mov_b32_e32 v1, s5
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s6
 ; CHECK-NEXT:    v_mov_b32_e32 v3, s7
 ; CHECK-NEXT:    v_mov_b32_e32 v4, s8
 ; CHECK-NEXT:    v_mov_b32_e32 v5, s9
+; CHECK-NEXT:    v_mov_b32_e32 v7, s10
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %or = or <4 x i64> %shift_amt, splat (i64 32)
   %ashr = ashr <4 x i64> %arg0, %or
diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
index 189b897793381..3465c782bd700 100644
--- a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
@@ -1433,25 +1433,15 @@ define i128 @fptoui_f32_to_i128(float %x) {
 }
 
 define i128 @fptosi_f16_to_i128(half %x) {
-; SDAG-LABEL: fptosi_f16_to_i128:
-; SDAG:       ; %bb.0:
-; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT:    v_cvt_f32_f16_e32 v0, v0
-; SDAG-NEXT:    v_cvt_i32_f32_e32 v0, v0
-; SDAG-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; SDAG-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
-; SDAG-NEXT:    v_mov_b32_e32 v3, v2
-; SDAG-NEXT:    s_setpc_b64 s[30:31]
-;
-; GISEL-LABEL: fptosi_f16_to_i128:
-; GISEL:       ; %bb.0:
-; GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-NEXT:    v_cvt_f32_f16_e32 v0, v0
-; GISEL-NEXT:    v_cvt_i32_f32_e32 v0, v0
-; GISEL-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GISEL-NEXT:    v_mov_b32_e32 v2, v1
-; GISEL-NEXT:    v_mov_b32_e32 v3, v1
-; GISEL-NEXT:    s_setpc_b64 s[30:31]
+; GCN-LABEL: fptosi_f16_to_i128:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; GCN-NEXT:    v_cvt_i32_f32_e32 v0, v0
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-NEXT:    v_mov_b32_e32 v2, v1
+; GCN-NEXT:    v_mov_b32_e32 v3, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cvt = fptosi half %x to i128
   ret i128 %cvt
 }
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
index 8e312a0e195ff..ef4e475c85f26 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
@@ -1643,15 +1643,15 @@ define amdgpu_kernel void @constant_sextload_v4i16_to_v4i32(ptr addrspace(1) %ou
 ; GCN-NOHSA-SI-NEXT:    s_load_dwordx2 s[4:5], s[2:3], 0x0
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s6, s4, 16
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s7, s5, 16
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s6, s5, 16
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s7, s4, 16
 ; GCN-NOHSA-SI-NEXT:    s_sext_i32_i16 s5, s5
 ; GCN-NOHSA-SI-NEXT:    s_sext_i32_i16 s4, s4
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s2, -1
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s4
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s6
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s5
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s7
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s6
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GCN-NOHSA-SI-NEXT:    s_endpgm
 ;
@@ -1666,14 +1666,14 @@ define amdgpu_kernel void @constant_sextload_v4i16_to_v4i32(ptr addrspace(1) %ou
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s1
 ; GCN-HSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-HSA-NEXT:    s_ashr_i32 s0, s2, 16
-; GCN-HSA-NEXT:    s_ashr_i32 s1, s3, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s0, s3, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s1, s2, 16
 ; GCN-HSA-NEXT:    s_sext_i32_i16 s3, s3
 ; GCN-HSA-NEXT:    s_sext_i32_i16 s2, s2
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s1
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s0
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    s_endpgm
 ;
diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
index 3753737d251e4..43249dc60ae16 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
@@ -1738,8 +1738,8 @@ define amdgpu_kernel void @global_sextload_v4i16_to_v4i32(ptr addrspace(1) %out,
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s4, s0
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s5, s1
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v1, 16, v4
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v3, 16, v5
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v1, 16, v4
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v2, v5, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v0, v4, 0, 16
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
@@ -1758,8 +1758,8 @@ define amdgpu_kernel void @global_sextload_v4i16_to_v4i32(ptr addrspace(1) %out,
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s1
 ; GCN-HSA-NEXT:    s_waitcnt vmcnt(0)
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v1, 16, v4
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v3, 16, v5
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v1, 16, v4
 ; GCN-HSA-NEXT:    v_bfe_i32 v2, v5, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v0, v4, 0, 16
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[6:7], v[0:3]
diff --git a/llvm/test/CodeGen/AMDGPU/mul_int24.ll b/llvm/test/CodeGen/AMDGPU/mul_int24.ll
index 4377e7569747a..935a4aba385b2 100644
--- a/llvm/test/CodeGen/AMDGPU/mul_int24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul_int24.ll
@@ -463,41 +463,39 @@ define amdgpu_kernel void @test_smul24_i64_square(ptr addrspace(1) %out, i32 %a,
 define amdgpu_kernel void @test_smul24_i33(ptr addrspace(1) %out, i33 %a, i33 %b) #0 {
 ; SI-LABEL: test_smul24_i33:
 ; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0xd
-; SI-NEXT:    s_mov_b32 s7, 0xf000
-; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
+; SI-NEXT:    s_load_dword s6, s[4:5], 0xd
+; SI-NEXT:    s_load_dword s4, s[4:5], 0xb
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s4, s0
-; SI-NEXT:    s_mov_b32 s5, s1
-; SI-NEXT:    s_bfe_i32 s0, s8, 0x180000
-; SI-NEXT:    s_bfe_i32 s1, s2, 0x180000
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    s_mul_i32 s0, s1, s0
-; SI-NEXT:    v_mul_hi_i32_i24_e32 v1, s1, v0
-; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    s_bfe_i32 s5, s6, 0x180000
+; SI-NEXT:    s_bfe_i32 s4, s4, 0x180000
+; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    s_mul_i32 s5, s4, s5
+; SI-NEXT:    v_mul_hi_i32_i24_e32 v1, s4, v0
+; SI-NEXT:    v_mov_b32_e32 v0, s5
 ; SI-NEXT:    v_lshl_b64 v[0:1], v[0:1], 31
 ; SI-NEXT:    v_ashr_i64 v[0:1], v[0:1], 31
-; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: test_smul24_i33:
 ; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
+; VI-NEXT:    s_load_dword s2, s[4:5], 0x2c
+; VI-NEXT:    s_load_dword s3, s[4:5], 0x34
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_bfe_i32 s2, s2, 0x180000
-; VI-NEXT:    s_bfe_i32 s3, s4, 0x180000
+; VI-NEXT:    s_bfe_i32 s3, s3, 0x180000
 ; VI-NEXT:    v_mov_b32_e32 v0, s3
 ; VI-NEXT:    v_mul_hi_i32_i24_e32 v1, s2, v0
 ; VI-NEXT:    v_mul_i32_i24_e32 v0, s2, v0
 ; VI-NEXT:    v_lshlrev_b64 v[0:1], 31, v[0:1]
-; VI-NEXT:    s_mov_b32 s4, s0
+; VI-NEXT:    s_mov_b32 s3, 0xf000
 ; VI-NEXT:    v_ashrrev_i64 v[0:1], 31, v[0:1]
-; VI-NEXT:    s_mov_b32 s5, s1
-; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: test_smul24_i33:
@@ -576,32 +574,30 @@ entry:
 define amdgpu_kernel void @test_smulhi24_i33(ptr addrspace(1) %out, i33 %a, i33 %b) {
 ; SI-LABEL: test_smulhi24_i33:
 ; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0xd
-; SI-NEXT:    s_mov_b32 s7, 0xf000
-; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_load_dword s6, s[4:5], 0xd
+; SI-NEXT:    s_load_dword s7, s[4:5], 0xb
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s4, s0
-; SI-NEXT:    s_mov_b32 s5, s1
-; SI-NEXT:    v_mov_b32_e32 v0, s8
-; SI-NEXT:    v_mul_hi_i32_i24_e32 v0, s2, v0
+; SI-NEXT:    v_mov_b32_e32 v0, s6
+; SI-NEXT:    v_mul_hi_i32_i24_e32 v0, s7, v0
 ; SI-NEXT:    v_and_b32_e32 v0, 1, v0
-; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: test_smulhi24_i33:
 ; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x34
-; VI-NEXT:    s_mov_b32 s7, 0xf000
-; VI-NEXT:    s_mov_b32 s6, -1
+; VI-NEXT:    s_load_dword s6, s[4:5], 0x34
+; VI-NEXT:    s_load_dword s7, s[4:5], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; VI-NEXT:    s_mov_b32 s3, 0xf000
+; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_mov_b32 s4, s0
-; VI-NEXT:    v_mov_b32_e32 v0, s8
-; VI-NEXT:    v_mul_hi_i32_i24_e32 v0, s2, v0
-; VI-NEXT:    s_mov_b32 s5, s1
+; VI-NEXT:    v_mov_b32_e32 v0, s6
+; VI-NEXT:    v_mul_hi_i32_i24_e32 v0, s7, v0
 ; VI-NEXT:    v_and_b32_e32 v0, 1, v0
-; VI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: test_smulhi24_i33:
diff --git a/llvm/test/CodeGen/AMDGPU/sra.ll b/llvm/test/CodeGen/AMDGPU/sra.ll
index 0b49b9c815da5..975d1a00ae9bd 100644
--- a/llvm/test/CodeGen/AMDGPU/sra.ll
+++ b/llvm/test/CodeGen/AMDGPU/sra.ll
@@ -791,16 +791,16 @@ define amdgpu_kernel void @v_ashr_32_i64(ptr addrspace(1) %out, ptr addrspace(1)
 define amdgpu_kernel void @s_ashr_33_i64(ptr addrspace(1) %out, [8 x i32], i64 %a, [8 x i32], i64 %b) {
 ; SI-LABEL: s_ashr_33_i64:
 ; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x13
+; SI-NEXT:    s_load_dword s6, s[4:5], 0x14
 ; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
 ; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x1d
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_ashr_i32 s6, s7, 31
-; SI-NEXT:    s_ashr_i32 s7, s7, 1
-; SI-NEXT:    s_add_u32 s4, s7, s4
-; SI-NEXT:    s_addc_u32 s5, s6, s5
+; SI-NEXT:    s_ashr_i32 s7, s6, 31
+; SI-NEXT:    s_ashr_i32 s6, s6, 1
+; SI-NEXT:    s_add_u32 s4, s6, s4
+; SI-NEXT:    s_addc_u32 s5, s7, s5
 ; SI-NEXT:    v_mov_b32_e32 v0, s4
 ; SI-NEXT:    v_mov_b32_e32 v1, s5
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -808,16 +808,16 @@ define amdgpu_kernel void @s_ashr_33_i64(ptr addrspace(1) %out, [8 x i32], i64 %
 ;
 ; VI-LABEL: s_ashr_33_i64:
 ; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x4c
+; VI-NEXT:    s_load_dword s6, s[4:5], 0x50
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x74
 ; VI-NEXT:    s_mov_b32 s3, 0xf000
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_ashr_i32 s6, s7, 31
-; VI-NEXT:    s_ashr_i32 s7, s7, 1
-; VI-NEXT:    s_add_u32 s4, s7, s4
-; VI-NEXT:    s_addc_u32 s5, s6, s5
+; VI-NEXT:    s_ashr_i32 s7, s6, 31
+; VI-NEXT:    s_ashr_i32 s6, s6, 1
+; VI-NEXT:    s_add_u32 s4, s6, s4
+; VI-NEXT:    s_addc_u32 s5, s7, s5
 ; VI-NEXT:    v_mov_b32_e32 v0, s4
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -854,13 +854,14 @@ define amdgpu_kernel void @v_ashr_33_i64(ptr addrspace(1) %out, ptr addrspace(1)
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
 ; SI-NEXT:    v_mov_b32_e32 v1, 0
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
-; SI-NEXT:    buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
-; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_mov_b64 s[8:9], s[2:3]
+; SI-NEXT:    s_mov_b64 s[10:11], s[6:7]
+; SI-NEXT:    buffer_load_dword v2, v[0:1], s[8:11], 0 addr64 offset:4
+; SI-NEXT:    s_mov_b64 s[4:5], s[0:1]
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; SI-NEXT:    v_ashrrev_i32_e32 v3, 1, v3
-; SI-NEXT:    buffer_store_dwordx2 v[3:4], v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
+; SI-NEXT:    v_ashrrev_i32_e32 v2, 1, v2
+; SI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: v_ashr_33_i64:
@@ -868,17 +869,19 @@ define amdgpu_kernel void @v_ashr_33_i64(ptr addrspace(1) %out, ptr addrspace(1)
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v0, vcc
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 4, v1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
 ; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s1
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v0, vcc
-; VI-NEXT:    v_ashrrev_i32_e32 v5, 31, v1
-; VI-NEXT:    v_ashrrev_i32_e32 v4, 1, v1
-; VI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
+; VI-NEXT:    v_ashrrev_i32_e32 v3, 31, v4
+; VI-NEXT:    v_ashrrev_i32_e32 v2, 1, v4
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; VI-NEXT:    s_endpgm
 ;
 ; EG-LABEL: v_ashr_33_i64:
@@ -914,16 +917,16 @@ define amdgpu_kernel void @v_ashr_33_i64(ptr addrspace(1) %out, ptr addrspace(1)
 define amdgpu_kernel void @s_ashr_62_i64(ptr addrspace(1) %out, [8 x i32], i64 %a, [8 x i32], i64 %b) {
 ; SI-LABEL: s_ashr_62_i64:
 ; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x13
+; SI-NEXT:    s_load_dword s6, s[4:5], 0x14
 ; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
 ; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x1d
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_ashr_i32 s6, s7, 31
-; SI-NEXT:    s_ashr_i32 s7, s7, 30
-; SI-NEXT:    s_add_u32 s4, s7, s4
-; SI-NEXT:    s_addc_u32 s5, s6, s5
+; SI-NEXT:    s_ashr_i32 s7, s6, 31
+; SI-NEXT:    s_ashr_i32 s6, s6, 30
+; SI-NEXT:    s_add_u32 s4, s6, s4
+; SI-NEXT:    s_addc_u32 s5, s7, s5
 ; SI-NEXT:    v_mov_b32_e32 v0, s4
 ; SI-NEXT:    v_mov_b32_e32 v1, s5
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -931,16 +934,16 @@ define amdgpu_kernel void @s_ashr_62_i64(ptr addrspace(1) %out, [8 x i32], i64 %
 ;
 ; VI-LABEL: s_ashr_62_i64:
 ; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x4c
+; VI-NEXT:    s_load_dword s6, s[4:5], 0x50
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x74
 ; VI-NEXT:    s_mov_b32 s3, 0xf000
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_ashr_i32 s6, s7, 31
-; VI-NEXT:    s_ashr_i32 s7, s7, 30
-; VI-NEXT:    s_add_u32 s4, s7, s4
-; VI-NEXT:    s_addc_u32 s5, s6, s5
+; VI-NEXT:    s_ashr_i32 s7, s6, 31
+; VI-NEXT:    s_ashr_i32 s6, s6, 30
+; VI-NEXT:    s_add_u32 s4, s6, s4
+; VI-NEXT:    s_addc_u32 s5, s7, s5
 ; VI-NEXT:    v_mov_b32_e32 v0, s4
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -977,13 +980,14 @@ define amdgpu_kernel void @v_ashr_62_i64(ptr addrspace(1) %out, ptr addrspace(1)
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
 ; SI-NEXT:    v_mov_b32_e32 v1, 0
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
-; SI-NEXT:    buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
-; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_mov_b64 s[8:9], s[2:3]
+; SI-NEXT:    s_mov_b64 s[10:11], s[6:7]
+; SI-NEXT:    buffer_load_dword v2, v[0:1], s[8:11], 0 addr64 offset:4
+; SI-NEXT:    s_mov_b64 s[4:5], s[0:1]
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; SI-NEXT:    v_ashrrev_i32_e32 v3, 30, v3
-; SI-NEXT:    buffer_store_dwordx2 v[3:4], v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
+; SI-NEXT:    v_ashrrev_i32_e32 v2, 30, v2
+; SI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: v_ashr_62_i64:
@@ -991,17 +995,19 @@ define amdgpu_kernel void @v_ashr_62_i64(ptr addrspace(1) %out, ptr addrspace(1)
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v0, vcc
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 4, v1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
 ; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s1
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v0, vcc
-; VI-NEXT:    v_ashrrev_i32_e32 v5, 31, v1
-; VI-NEXT:    v_ashrrev_i32_e32 v4, 30, v1
-; VI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
+; VI-NEXT:    v_ashrrev_i32_e32 v3, 31, v4
+; VI-NEXT:    v_ashrrev_i32_e32 v2, 30, v4
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; VI-NEXT:    s_endpgm
 ;
 ; EG-LABEL: v_ashr_62_i64:
@@ -1038,15 +1044,15 @@ define amdgpu_kernel void @v_ashr_62_i64(ptr addrspace(1) %out, ptr addrspace(1)
 define amdgpu_kernel void @s_ashr_63_i64(ptr addrspace(1) %out, [8 x i32], i64 %a, [8 x i32], i64 %b) {
 ; SI-LABEL: s_ashr_63_i64:
 ; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x13
-; SI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x1d
+; SI-NEXT:    s_load_dword s8, s[4:5], 0x14
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x1d
 ; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_ashr_i32 s5, s7, 31
-; SI-NEXT:    s_add_u32 s4, s5, s8
-; SI-NEXT:    s_addc_u32 s5, s5, s9
+; SI-NEXT:    s_ashr_i32 s5, s8, 31
+; SI-NEXT:    s_add_u32 s4, s5, s6
+; SI-NEXT:    s_addc_u32 s5, s5, s7
 ; SI-NEXT:    v_mov_b32_e32 v0, s4
 ; SI-NEXT:    v_mov_b32_e32 v1, s5
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -1054,15 +1060,15 @@ define amdgpu_kernel void @s_ashr_63_i64(ptr addrspace(1) %out, [8 x i32], i64 %
 ;
 ; VI-LABEL: s_ashr_63_i64:
 ; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x4c
-; VI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x74
+; VI-NEXT:    s_load_dword s8, s[4:5], 0x50
+; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x74
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
 ; VI-NEXT:    s_mov_b32 s3, 0xf000
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_ashr_i32 s5, s7, 31
-; VI-NEXT:    s_add_u32 s4, s5, s8
-; VI-NEXT:    s_addc_u32 s5, s5, s9
+; VI-NEXT:    s_ashr_i32 s5, s8, 31
+; VI-NEXT:    s_add_u32 s4, s5, s6
+; VI-NEXT:    s_addc_u32 s5, s5, s7
 ; VI-NEXT:    v_mov_b32_e32 v0, s4
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -1098,13 +1104,14 @@ define amdgpu_kernel void @v_ashr_63_i64(ptr addrspace(1) %out, ptr addrspace(1)
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
 ; SI-NEXT:    v_mov_b32_e32 v1, 0
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
-; SI-NEXT:    buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
-; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_mov_b64 s[8:9], s[2:3]
+; SI-NEXT:    s_mov_b64 s[10:11], s[6:7]
+; SI-NEXT:    buffer_load_dword v2, v[0:1], s[8:11], 0 addr64 offset:4
+; SI-NEXT:    s_mov_b64 s[4:5], s[0:1]
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_ashrrev_i32_e32 v2, 31, v3
+; SI-NEXT:    v_ashrrev_i32_e32 v2, 31, v2
 ; SI-NEXT:    v_mov_b32_e32 v3, v2
-; SI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: v_ashr_63_i64:
@@ -1112,17 +1119,19 @@ define amdgpu_kernel void @v_ashr_63_i64(ptr addrspace(1) %out, ptr addrspace(1)
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v0, vcc
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 4, v1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
 ; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s1
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v0, vcc
-; VI-NEXT:    v_ashrrev_i32_e32 v0, 31, v1
-; VI-NEXT:    v_mov_b32_e32 v1, v0
-; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    v_ashrrev_i32_e32 v2, 31, v3
+; VI-NEXT:    v_mov_b32_e32 v3, v2
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; VI-NEXT:    s_endpgm
 ;
 ; EG-LABEL: v_ashr_63_i64:

>From 24fe2d9c7c225585407b4af0c117b60473666501 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Fri, 6 Jun 2025 17:53:28 -0500
Subject: [PATCH 3/8] Perform sra combine before type legalization

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 11 ++--
 llvm/test/CodeGen/AMDGPU/ashr64_reduce.ll     | 56 +++++++++++++------
 2 files changed, 43 insertions(+), 24 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 9a50eac799e3a..f105523edd405 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -5333,10 +5333,11 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
     break;
   }
   case ISD::SHL:
+  case ISD::SRA:
   case ISD::SRL: {
     // Range metadata can be invalidated when loads are converted to legal types
     // (e.g. v2i64 -> v4i32).
-    // Try to convert vector shl/srl before type legalization so that range
+    // Try to convert vector shl/sra/srl before type legalization so that range
     // metadata can be utilized.
     if (!(N->getValueType(0).isVector() &&
           DCI.getDAGCombineLevel() == BeforeLegalizeTypes) &&
@@ -5344,14 +5345,10 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
       break;
     if (N->getOpcode() == ISD::SHL)
       return performShlCombine(N, DCI);
+    if (N->getOpcode() == ISD::SRA)
+      return performSraCombine(N, DCI);
     return performSrlCombine(N, DCI);
   }
-  case ISD::SRA: {
-    if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
-      break;
-
-    return performSraCombine(N, DCI);
-  }
   case ISD::TRUNCATE:
     return performTruncateCombine(N, DCI);
   case ISD::MUL:
diff --git a/llvm/test/CodeGen/AMDGPU/ashr64_reduce.ll b/llvm/test/CodeGen/AMDGPU/ashr64_reduce.ll
index e24d74f03a2ba..ba7c054d24308 100644
--- a/llvm/test/CodeGen/AMDGPU/ashr64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/ashr64_reduce.ll
@@ -108,10 +108,14 @@ define <2 x i64> @ashr_v2_metadata(<2 x i64> %arg0, ptr %arg1.ptr) {
 ; CHECK-LABEL: ashr_v2_metadata:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
+; CHECK-NEXT:    flat_load_dwordx4 v[6:9], v[4:5]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v5, 31, v1
+; CHECK-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_ashrrev_i64 v[0:1], v4, v[0:1]
-; CHECK-NEXT:    v_ashrrev_i64 v[2:3], v6, v[2:3]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v0, v6, v1
+; CHECK-NEXT:    v_ashrrev_i32_e32 v2, v8, v3
+; CHECK-NEXT:    v_mov_b32_e32 v1, v5
+; CHECK-NEXT:    v_mov_b32_e32 v3, v4
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %shift.amt = load <2 x i64>, ptr %arg1.ptr, !range !0, !noundef !{}
   %ashr = ashr <2 x i64> %arg0, %shift.amt
@@ -123,10 +127,14 @@ define <2 x i64> @ashr_exact_v2_metadata(<2 x i64> %arg0, ptr %arg1.ptr) {
 ; CHECK-LABEL: ashr_exact_v2_metadata:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
+; CHECK-NEXT:    flat_load_dwordx4 v[6:9], v[4:5]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v5, 31, v1
+; CHECK-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_ashrrev_i64 v[0:1], v4, v[0:1]
-; CHECK-NEXT:    v_ashrrev_i64 v[2:3], v6, v[2:3]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v0, v6, v1
+; CHECK-NEXT:    v_ashrrev_i32_e32 v2, v8, v3
+; CHECK-NEXT:    v_mov_b32_e32 v1, v5
+; CHECK-NEXT:    v_mov_b32_e32 v3, v4
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %shift.amt = load <2 x i64>, ptr %arg1.ptr, !range !0, !noundef !{}
   %ashr = ashr exact <2 x i64> %arg0, %shift.amt
@@ -137,12 +145,18 @@ define <3 x i64> @ashr_v3_metadata(<3 x i64> %arg0, ptr %arg1.ptr) {
 ; CHECK-LABEL: ashr_v3_metadata:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    flat_load_dword v12, v[6:7] offset:16
-; CHECK-NEXT:    flat_load_dwordx4 v[8:11], v[6:7]
+; CHECK-NEXT:    flat_load_dword v0, v[6:7] offset:16
+; CHECK-NEXT:    flat_load_dwordx4 v[9:12], v[6:7]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v7, 31, v1
+; CHECK-NEXT:    v_ashrrev_i32_e32 v8, 31, v3
+; CHECK-NEXT:    v_ashrrev_i32_e32 v6, 31, v5
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_ashrrev_i64 v[4:5], v12, v[4:5]
-; CHECK-NEXT:    v_ashrrev_i64 v[0:1], v8, v[0:1]
-; CHECK-NEXT:    v_ashrrev_i64 v[2:3], v10, v[2:3]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v4, v0, v5
+; CHECK-NEXT:    v_ashrrev_i32_e32 v0, v9, v1
+; CHECK-NEXT:    v_ashrrev_i32_e32 v2, v11, v3
+; CHECK-NEXT:    v_mov_b32_e32 v1, v7
+; CHECK-NEXT:    v_mov_b32_e32 v3, v8
+; CHECK-NEXT:    v_mov_b32_e32 v5, v6
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %shift.amt = load <3 x i64>, ptr %arg1.ptr, !range !0, !noundef !{}
   %ashr = ashr <3 x i64> %arg0, %shift.amt
@@ -153,15 +167,23 @@ define <4 x i64> @ashr_v4_metadata(<4 x i64> %arg0, ptr %arg1.ptr) {
 ; CHECK-LABEL: ashr_v4_metadata:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    flat_load_dwordx4 v[10:13], v[8:9]
+; CHECK-NEXT:    flat_load_dwordx4 v[12:15], v[8:9]
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    flat_load_dwordx4 v[13:16], v[8:9] offset:16
+; CHECK-NEXT:    flat_load_dwordx4 v[15:18], v[8:9] offset:16
 ; CHECK-NEXT:    ; kill: killed $vgpr8 killed $vgpr9
-; CHECK-NEXT:    v_ashrrev_i64 v[0:1], v10, v[0:1]
-; CHECK-NEXT:    v_ashrrev_i64 v[2:3], v12, v[2:3]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v11, 31, v1
+; CHECK-NEXT:    v_ashrrev_i32_e32 v9, 31, v3
+; CHECK-NEXT:    v_ashrrev_i32_e32 v10, 31, v5
+; CHECK-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
+; CHECK-NEXT:    v_ashrrev_i32_e32 v0, v12, v1
+; CHECK-NEXT:    v_ashrrev_i32_e32 v2, v14, v3
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_ashrrev_i64 v[4:5], v13, v[4:5]
-; CHECK-NEXT:    v_ashrrev_i64 v[6:7], v15, v[6:7]
+; CHECK-NEXT:    v_ashrrev_i32_e32 v4, v15, v5
+; CHECK-NEXT:    v_ashrrev_i32_e32 v6, v17, v7
+; CHECK-NEXT:    v_mov_b32_e32 v1, v11
+; CHECK-NEXT:    v_mov_b32_e32 v3, v9
+; CHECK-NEXT:    v_mov_b32_e32 v5, v10
+; CHECK-NEXT:    v_mov_b32_e32 v7, v8
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %shift.amt = load <4 x i64>, ptr %arg1.ptr, !range !0, !noundef !{}
   %ashr = ashr <4 x i64> %arg0, %shift.amt

>From 482f3939babe995e04710db879dece480a9f24e4 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Fri, 6 Jun 2025 18:04:42 -0500
Subject: [PATCH 4/8] Simplify code

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index f105523edd405..861dac3cc996a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4239,10 +4239,7 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
     }
     Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, ConcatType, HiAndLoOps);
   } else {
-    if (Known.getMinValue().getZExtValue() == (ElementType.getSizeInBits() - 1))
-      Vec = DAG.getBuildVector(ConcatType, SL, {HiShift, HiShift});
-    else
-      Vec = DAG.getBuildVector(ConcatType, SL, {NewShift, HiShift});
+    Vec = DAG.getBuildVector(ConcatType, SL, {NewShift, HiShift});
   }
   return DAG.getNode(ISD::BITCAST, SL, VT, Vec);
 }

>From 8eab35dd555fcc2de6d836392f94aae5baede580 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Mon, 16 Jun 2025 11:57:28 -0500
Subject: [PATCH 5/8] Freeze hi-half of sra source

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp |   3 +-
 llvm/test/CodeGen/AMDGPU/fptoi.i128.ll        |  28 +++-
 llvm/test/CodeGen/AMDGPU/load-constant-i16.ll |  16 +-
 llvm/test/CodeGen/AMDGPU/load-global-i16.ll   |   4 +-
 llvm/test/CodeGen/AMDGPU/mul_int24.ll         |  74 +++++----
 llvm/test/CodeGen/AMDGPU/sra.ll               | 157 +++++++++---------
 6 files changed, 144 insertions(+), 138 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 861dac3cc996a..7ce73b4255ea1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4157,7 +4157,7 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
   SDValue LHS = N->getOperand(0);
   SelectionDAG &DAG = DCI.DAG;
   SDLoc SL(N);
-  
+
   if (VT.getScalarType() != MVT::i64)
     return SDValue();
 
@@ -4216,6 +4216,7 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
     SDValue SplitLHS = DAG.getNode(ISD::BITCAST, LHSSL, ConcatType, LHS);
     Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, LHSSL, TargetType, SplitLHS, One);
   }
+  Hi = DAG.getFreeze(Hi);
 
   SDValue HiShift = DAG.getNode(ISD::SRA, SL, TargetType, Hi, ShiftFullAmt);
   SDValue NewShift = DAG.getNode(ISD::SRA, SL, TargetType, Hi, ShiftAmt);
diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
index 3465c782bd700..189b897793381 100644
--- a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
@@ -1433,15 +1433,25 @@ define i128 @fptoui_f32_to_i128(float %x) {
 }
 
 define i128 @fptosi_f16_to_i128(half %x) {
-; GCN-LABEL: fptosi_f16_to_i128:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_cvt_f32_f16_e32 v0, v0
-; GCN-NEXT:    v_cvt_i32_f32_e32 v0, v0
-; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NEXT:    v_mov_b32_e32 v2, v1
-; GCN-NEXT:    v_mov_b32_e32 v3, v1
-; GCN-NEXT:    s_setpc_b64 s[30:31]
+; SDAG-LABEL: fptosi_f16_to_i128:
+; SDAG:       ; %bb.0:
+; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; SDAG-NEXT:    v_cvt_i32_f32_e32 v0, v0
+; SDAG-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; SDAG-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
+; SDAG-NEXT:    v_mov_b32_e32 v3, v2
+; SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GISEL-LABEL: fptosi_f16_to_i128:
+; GISEL:       ; %bb.0:
+; GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; GISEL-NEXT:    v_cvt_i32_f32_e32 v0, v0
+; GISEL-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GISEL-NEXT:    v_mov_b32_e32 v2, v1
+; GISEL-NEXT:    v_mov_b32_e32 v3, v1
+; GISEL-NEXT:    s_setpc_b64 s[30:31]
   %cvt = fptosi half %x to i128
   ret i128 %cvt
 }
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
index ef4e475c85f26..8e312a0e195ff 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
@@ -1643,15 +1643,15 @@ define amdgpu_kernel void @constant_sextload_v4i16_to_v4i32(ptr addrspace(1) %ou
 ; GCN-NOHSA-SI-NEXT:    s_load_dwordx2 s[4:5], s[2:3], 0x0
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s6, s5, 16
-; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s7, s4, 16
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s6, s4, 16
+; GCN-NOHSA-SI-NEXT:    s_ashr_i32 s7, s5, 16
 ; GCN-NOHSA-SI-NEXT:    s_sext_i32_i16 s5, s5
 ; GCN-NOHSA-SI-NEXT:    s_sext_i32_i16 s4, s4
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s2, -1
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v0, s4
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v1, s6
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v2, s5
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s6
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v3, s7
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GCN-NOHSA-SI-NEXT:    s_endpgm
 ;
@@ -1666,14 +1666,14 @@ define amdgpu_kernel void @constant_sextload_v4i16_to_v4i32(ptr addrspace(1) %ou
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v4, s0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v5, s1
 ; GCN-HSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-HSA-NEXT:    s_ashr_i32 s0, s3, 16
-; GCN-HSA-NEXT:    s_ashr_i32 s1, s2, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s0, s2, 16
+; GCN-HSA-NEXT:    s_ashr_i32 s1, s3, 16
 ; GCN-HSA-NEXT:    s_sext_i32_i16 s3, s3
 ; GCN-HSA-NEXT:    s_sext_i32_i16 s2, s2
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v2, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, s1
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GCN-HSA-NEXT:    s_endpgm
 ;
diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
index 43249dc60ae16..3753737d251e4 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
@@ -1738,8 +1738,8 @@ define amdgpu_kernel void @global_sextload_v4i16_to_v4i32(ptr addrspace(1) %out,
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s4, s0
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s5, s1
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v3, 16, v5
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v1, 16, v4
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v3, 16, v5
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v2, v5, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v0, v4, 0, 16
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
@@ -1758,8 +1758,8 @@ define amdgpu_kernel void @global_sextload_v4i16_to_v4i32(ptr addrspace(1) %out,
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v6, s0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v7, s1
 ; GCN-HSA-NEXT:    s_waitcnt vmcnt(0)
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v3, 16, v5
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v1, 16, v4
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v3, 16, v5
 ; GCN-HSA-NEXT:    v_bfe_i32 v2, v5, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v0, v4, 0, 16
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[6:7], v[0:3]
diff --git a/llvm/test/CodeGen/AMDGPU/mul_int24.ll b/llvm/test/CodeGen/AMDGPU/mul_int24.ll
index 935a4aba385b2..4377e7569747a 100644
--- a/llvm/test/CodeGen/AMDGPU/mul_int24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul_int24.ll
@@ -463,39 +463,41 @@ define amdgpu_kernel void @test_smul24_i64_square(ptr addrspace(1) %out, i32 %a,
 define amdgpu_kernel void @test_smul24_i33(ptr addrspace(1) %out, i33 %a, i33 %b) #0 {
 ; SI-LABEL: test_smul24_i33:
 ; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
-; SI-NEXT:    s_load_dword s6, s[4:5], 0xd
-; SI-NEXT:    s_load_dword s4, s[4:5], 0xb
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0xd
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_bfe_i32 s5, s6, 0x180000
-; SI-NEXT:    s_bfe_i32 s4, s4, 0x180000
-; SI-NEXT:    v_mov_b32_e32 v0, s5
-; SI-NEXT:    s_mul_i32 s5, s4, s5
-; SI-NEXT:    v_mul_hi_i32_i24_e32 v1, s4, v0
-; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_mov_b32 s5, s1
+; SI-NEXT:    s_bfe_i32 s0, s8, 0x180000
+; SI-NEXT:    s_bfe_i32 s1, s2, 0x180000
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    s_mul_i32 s0, s1, s0
+; SI-NEXT:    v_mul_hi_i32_i24_e32 v1, s1, v0
+; SI-NEXT:    v_mov_b32_e32 v0, s0
 ; SI-NEXT:    v_lshl_b64 v[0:1], v[0:1], 31
 ; SI-NEXT:    v_ashr_i64 v[0:1], v[0:1], 31
-; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: test_smul24_i33:
 ; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dword s2, s[4:5], 0x2c
-; VI-NEXT:    s_load_dword s3, s[4:5], 0x34
-; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    s_mov_b32 s7, 0xf000
+; VI-NEXT:    s_mov_b32 s6, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_bfe_i32 s2, s2, 0x180000
-; VI-NEXT:    s_bfe_i32 s3, s3, 0x180000
+; VI-NEXT:    s_bfe_i32 s3, s4, 0x180000
 ; VI-NEXT:    v_mov_b32_e32 v0, s3
 ; VI-NEXT:    v_mul_hi_i32_i24_e32 v1, s2, v0
 ; VI-NEXT:    v_mul_i32_i24_e32 v0, s2, v0
 ; VI-NEXT:    v_lshlrev_b64 v[0:1], 31, v[0:1]
-; VI-NEXT:    s_mov_b32 s3, 0xf000
+; VI-NEXT:    s_mov_b32 s4, s0
 ; VI-NEXT:    v_ashrrev_i64 v[0:1], 31, v[0:1]
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; VI-NEXT:    s_mov_b32 s5, s1
+; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: test_smul24_i33:
@@ -574,30 +576,32 @@ entry:
 define amdgpu_kernel void @test_smulhi24_i33(ptr addrspace(1) %out, i33 %a, i33 %b) {
 ; SI-LABEL: test_smulhi24_i33:
 ; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_load_dword s6, s[4:5], 0xd
-; SI-NEXT:    s_load_dword s7, s[4:5], 0xb
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0xd
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s6
-; SI-NEXT:    v_mul_hi_i32_i24_e32 v0, s7, v0
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_mov_b32 s5, s1
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_mul_hi_i32_i24_e32 v0, s2, v0
 ; SI-NEXT:    v_and_b32_e32 v0, 1, v0
-; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: test_smulhi24_i33:
 ; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dword s6, s[4:5], 0x34
-; VI-NEXT:    s_load_dword s7, s[4:5], 0x2c
-; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x34
+; VI-NEXT:    s_mov_b32 s7, 0xf000
+; VI-NEXT:    s_mov_b32 s6, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s6
-; VI-NEXT:    v_mul_hi_i32_i24_e32 v0, s7, v0
+; VI-NEXT:    s_mov_b32 s4, s0
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_mul_hi_i32_i24_e32 v0, s2, v0
+; VI-NEXT:    s_mov_b32 s5, s1
 ; VI-NEXT:    v_and_b32_e32 v0, 1, v0
-; VI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: test_smulhi24_i33:
diff --git a/llvm/test/CodeGen/AMDGPU/sra.ll b/llvm/test/CodeGen/AMDGPU/sra.ll
index 975d1a00ae9bd..0b49b9c815da5 100644
--- a/llvm/test/CodeGen/AMDGPU/sra.ll
+++ b/llvm/test/CodeGen/AMDGPU/sra.ll
@@ -791,16 +791,16 @@ define amdgpu_kernel void @v_ashr_32_i64(ptr addrspace(1) %out, ptr addrspace(1)
 define amdgpu_kernel void @s_ashr_33_i64(ptr addrspace(1) %out, [8 x i32], i64 %a, [8 x i32], i64 %b) {
 ; SI-LABEL: s_ashr_33_i64:
 ; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dword s6, s[4:5], 0x14
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x13
 ; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
 ; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x1d
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_ashr_i32 s7, s6, 31
-; SI-NEXT:    s_ashr_i32 s6, s6, 1
-; SI-NEXT:    s_add_u32 s4, s6, s4
-; SI-NEXT:    s_addc_u32 s5, s7, s5
+; SI-NEXT:    s_ashr_i32 s6, s7, 31
+; SI-NEXT:    s_ashr_i32 s7, s7, 1
+; SI-NEXT:    s_add_u32 s4, s7, s4
+; SI-NEXT:    s_addc_u32 s5, s6, s5
 ; SI-NEXT:    v_mov_b32_e32 v0, s4
 ; SI-NEXT:    v_mov_b32_e32 v1, s5
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -808,16 +808,16 @@ define amdgpu_kernel void @s_ashr_33_i64(ptr addrspace(1) %out, [8 x i32], i64 %
 ;
 ; VI-LABEL: s_ashr_33_i64:
 ; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dword s6, s[4:5], 0x50
+; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x4c
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x74
 ; VI-NEXT:    s_mov_b32 s3, 0xf000
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_ashr_i32 s7, s6, 31
-; VI-NEXT:    s_ashr_i32 s6, s6, 1
-; VI-NEXT:    s_add_u32 s4, s6, s4
-; VI-NEXT:    s_addc_u32 s5, s7, s5
+; VI-NEXT:    s_ashr_i32 s6, s7, 31
+; VI-NEXT:    s_ashr_i32 s7, s7, 1
+; VI-NEXT:    s_add_u32 s4, s7, s4
+; VI-NEXT:    s_addc_u32 s5, s6, s5
 ; VI-NEXT:    v_mov_b32_e32 v0, s4
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -854,14 +854,13 @@ define amdgpu_kernel void @v_ashr_33_i64(ptr addrspace(1) %out, ptr addrspace(1)
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
 ; SI-NEXT:    v_mov_b32_e32 v1, 0
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b64 s[8:9], s[2:3]
-; SI-NEXT:    s_mov_b64 s[10:11], s[6:7]
-; SI-NEXT:    buffer_load_dword v2, v[0:1], s[8:11], 0 addr64 offset:4
-; SI-NEXT:    s_mov_b64 s[4:5], s[0:1]
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; SI-NEXT:    v_ashrrev_i32_e32 v2, 1, v2
-; SI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
+; SI-NEXT:    v_ashrrev_i32_e32 v3, 1, v3
+; SI-NEXT:    buffer_store_dwordx2 v[3:4], v[0:1], s[0:3], 0 addr64
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: v_ashr_33_i64:
@@ -869,19 +868,17 @@ define amdgpu_kernel void @v_ashr_33_i64(ptr addrspace(1) %out, ptr addrspace(1)
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v0, vcc
-; VI-NEXT:    v_add_u32_e32 v0, vcc, 4, v1
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v4, v[0:1]
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
 ; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_ashrrev_i32_e32 v3, 31, v4
-; VI-NEXT:    v_ashrrev_i32_e32 v2, 1, v4
-; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v0, vcc
+; VI-NEXT:    v_ashrrev_i32_e32 v5, 31, v1
+; VI-NEXT:    v_ashrrev_i32_e32 v4, 1, v1
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
 ; VI-NEXT:    s_endpgm
 ;
 ; EG-LABEL: v_ashr_33_i64:
@@ -917,16 +914,16 @@ define amdgpu_kernel void @v_ashr_33_i64(ptr addrspace(1) %out, ptr addrspace(1)
 define amdgpu_kernel void @s_ashr_62_i64(ptr addrspace(1) %out, [8 x i32], i64 %a, [8 x i32], i64 %b) {
 ; SI-LABEL: s_ashr_62_i64:
 ; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dword s6, s[4:5], 0x14
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x13
 ; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
 ; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x1d
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_ashr_i32 s7, s6, 31
-; SI-NEXT:    s_ashr_i32 s6, s6, 30
-; SI-NEXT:    s_add_u32 s4, s6, s4
-; SI-NEXT:    s_addc_u32 s5, s7, s5
+; SI-NEXT:    s_ashr_i32 s6, s7, 31
+; SI-NEXT:    s_ashr_i32 s7, s7, 30
+; SI-NEXT:    s_add_u32 s4, s7, s4
+; SI-NEXT:    s_addc_u32 s5, s6, s5
 ; SI-NEXT:    v_mov_b32_e32 v0, s4
 ; SI-NEXT:    v_mov_b32_e32 v1, s5
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -934,16 +931,16 @@ define amdgpu_kernel void @s_ashr_62_i64(ptr addrspace(1) %out, [8 x i32], i64 %
 ;
 ; VI-LABEL: s_ashr_62_i64:
 ; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dword s6, s[4:5], 0x50
+; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x4c
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x74
 ; VI-NEXT:    s_mov_b32 s3, 0xf000
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_ashr_i32 s7, s6, 31
-; VI-NEXT:    s_ashr_i32 s6, s6, 30
-; VI-NEXT:    s_add_u32 s4, s6, s4
-; VI-NEXT:    s_addc_u32 s5, s7, s5
+; VI-NEXT:    s_ashr_i32 s6, s7, 31
+; VI-NEXT:    s_ashr_i32 s7, s7, 30
+; VI-NEXT:    s_add_u32 s4, s7, s4
+; VI-NEXT:    s_addc_u32 s5, s6, s5
 ; VI-NEXT:    v_mov_b32_e32 v0, s4
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -980,14 +977,13 @@ define amdgpu_kernel void @v_ashr_62_i64(ptr addrspace(1) %out, ptr addrspace(1)
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
 ; SI-NEXT:    v_mov_b32_e32 v1, 0
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b64 s[8:9], s[2:3]
-; SI-NEXT:    s_mov_b64 s[10:11], s[6:7]
-; SI-NEXT:    buffer_load_dword v2, v[0:1], s[8:11], 0 addr64 offset:4
-; SI-NEXT:    s_mov_b64 s[4:5], s[0:1]
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; SI-NEXT:    v_ashrrev_i32_e32 v2, 30, v2
-; SI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
+; SI-NEXT:    v_ashrrev_i32_e32 v3, 30, v3
+; SI-NEXT:    buffer_store_dwordx2 v[3:4], v[0:1], s[0:3], 0 addr64
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: v_ashr_62_i64:
@@ -995,19 +991,17 @@ define amdgpu_kernel void @v_ashr_62_i64(ptr addrspace(1) %out, ptr addrspace(1)
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v0, vcc
-; VI-NEXT:    v_add_u32_e32 v0, vcc, 4, v1
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v4, v[0:1]
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
 ; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_ashrrev_i32_e32 v3, 31, v4
-; VI-NEXT:    v_ashrrev_i32_e32 v2, 30, v4
-; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v0, vcc
+; VI-NEXT:    v_ashrrev_i32_e32 v5, 31, v1
+; VI-NEXT:    v_ashrrev_i32_e32 v4, 30, v1
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
 ; VI-NEXT:    s_endpgm
 ;
 ; EG-LABEL: v_ashr_62_i64:
@@ -1044,15 +1038,15 @@ define amdgpu_kernel void @v_ashr_62_i64(ptr addrspace(1) %out, ptr addrspace(1)
 define amdgpu_kernel void @s_ashr_63_i64(ptr addrspace(1) %out, [8 x i32], i64 %a, [8 x i32], i64 %b) {
 ; SI-LABEL: s_ashr_63_i64:
 ; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dword s8, s[4:5], 0x14
-; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x1d
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x13
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x1d
 ; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_ashr_i32 s5, s8, 31
-; SI-NEXT:    s_add_u32 s4, s5, s6
-; SI-NEXT:    s_addc_u32 s5, s5, s7
+; SI-NEXT:    s_ashr_i32 s5, s7, 31
+; SI-NEXT:    s_add_u32 s4, s5, s8
+; SI-NEXT:    s_addc_u32 s5, s5, s9
 ; SI-NEXT:    v_mov_b32_e32 v0, s4
 ; SI-NEXT:    v_mov_b32_e32 v1, s5
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -1060,15 +1054,15 @@ define amdgpu_kernel void @s_ashr_63_i64(ptr addrspace(1) %out, [8 x i32], i64 %
 ;
 ; VI-LABEL: s_ashr_63_i64:
 ; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dword s8, s[4:5], 0x50
-; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x74
+; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x4c
+; VI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x74
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
 ; VI-NEXT:    s_mov_b32 s3, 0xf000
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_ashr_i32 s5, s8, 31
-; VI-NEXT:    s_add_u32 s4, s5, s6
-; VI-NEXT:    s_addc_u32 s5, s5, s7
+; VI-NEXT:    s_ashr_i32 s5, s7, 31
+; VI-NEXT:    s_add_u32 s4, s5, s8
+; VI-NEXT:    s_addc_u32 s5, s5, s9
 ; VI-NEXT:    v_mov_b32_e32 v0, s4
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -1104,14 +1098,13 @@ define amdgpu_kernel void @v_ashr_63_i64(ptr addrspace(1) %out, ptr addrspace(1)
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
 ; SI-NEXT:    v_mov_b32_e32 v1, 0
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b64 s[8:9], s[2:3]
-; SI-NEXT:    s_mov_b64 s[10:11], s[6:7]
-; SI-NEXT:    buffer_load_dword v2, v[0:1], s[8:11], 0 addr64 offset:4
-; SI-NEXT:    s_mov_b64 s[4:5], s[0:1]
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_ashrrev_i32_e32 v2, 31, v2
+; SI-NEXT:    v_ashrrev_i32_e32 v2, 31, v3
 ; SI-NEXT:    v_mov_b32_e32 v3, v2
-; SI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: v_ashr_63_i64:
@@ -1119,19 +1112,17 @@ define amdgpu_kernel void @v_ashr_63_i64(ptr addrspace(1) %out, ptr addrspace(1)
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v0, vcc
-; VI-NEXT:    v_add_u32_e32 v0, vcc, 4, v1
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1]
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
 ; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_ashrrev_i32_e32 v2, 31, v3
-; VI-NEXT:    v_mov_b32_e32 v3, v2
-; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v0, vcc
+; VI-NEXT:    v_ashrrev_i32_e32 v0, 31, v1
+; VI-NEXT:    v_mov_b32_e32 v1, v0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; VI-NEXT:    s_endpgm
 ;
 ; EG-LABEL: v_ashr_63_i64:

>From e7664621a3087b676c252df0ad4dafa9e9defc0f Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Mon, 16 Jun 2025 11:58:31 -0500
Subject: [PATCH 6/8] Fix comment formatting

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 7ce73b4255ea1..6baa9ed1a8e98 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4162,7 +4162,7 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
     return SDValue();
 
   // for C >= 32
-  // i64 (sra x, C) -> (build_pair (sra hi_32(x), C -32), sra hi_32(x), 31))
+  // i64 (sra x, C) -> (build_pair (sra hi_32(x), C - 32), sra hi_32(x), 31))
 
   // On some subtargets, 64-bit shift is a quarter rate instruction. In the
   // common case, splitting this into a move and a 32-bit shift is faster and
@@ -4279,7 +4279,7 @@ SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
     return SDValue();
 
   // for C >= 32
-  // i64 (srl x, C) -> (build_pair (srl hi_32(x), C -32), 0)
+  // i64 (srl x, C) -> (build_pair (srl hi_32(x), C - 32), 0)
 
   // On some subtargets, 64-bit shift is a quarter rate instruction. In the
   // common case, splitting this into a move and a 32-bit shift is faster and

>From e9e51e5b944a91e32db1094bcf6d11ab1f301eb9 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Mon, 16 Jun 2025 14:29:25 -0500
Subject: [PATCH 7/8] Handle max shift-amt more cleanly

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp |  9 ++++-----
 llvm/test/CodeGen/AMDGPU/ashr64_reduce.ll     | 15 +++++++++++++++
 2 files changed, 19 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 6baa9ed1a8e98..1ef314fa439aa 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4185,6 +4185,9 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
 
     ShiftAmt = DAG.getConstant(RHSVal - TargetScalarType.getSizeInBits(), SL,
                                TargetType);
+  } else if (Known.getMinValue().getZExtValue() ==
+             (ElementType.getSizeInBits() - 1)) {
+    ShiftAmt = ShiftFullAmt;
   } else {
     SDValue truncShiftAmt = DAG.getNode(ISD::TRUNCATE, SL, TargetType, RHS);
     const SDValue ShiftMask =
@@ -4232,11 +4235,7 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
     DAG.ExtractVectorElements(NewShift, LoOps, 0, NElts);
     for (unsigned I = 0; I != NElts; ++I) {
       HiAndLoOps[2 * I + 1] = HiOps[I];
-      if (Known.getMinValue().getZExtValue() ==
-          (ElementType.getSizeInBits() - 1))
-        HiAndLoOps[2 * I] = HiOps[I];
-      else
-        HiAndLoOps[2 * I] = LoOps[I];
+      HiAndLoOps[2 * I] = LoOps[I];
     }
     Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, ConcatType, HiAndLoOps);
   } else {
diff --git a/llvm/test/CodeGen/AMDGPU/ashr64_reduce.ll b/llvm/test/CodeGen/AMDGPU/ashr64_reduce.ll
index ba7c054d24308..78942bfc68d63 100644
--- a/llvm/test/CodeGen/AMDGPU/ashr64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/ashr64_reduce.ll
@@ -122,6 +122,20 @@ define <2 x i64> @ashr_v2_metadata(<2 x i64> %arg0, ptr %arg1.ptr) {
   ret <2 x i64> %ashr
 }
 
+define <2 x i64> @ashr_v2_metadata_63(<2 x i64> %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: ashr_v2_metadata_63:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_ashrrev_i32_e32 v0, 31, v1
+; CHECK-NEXT:    v_ashrrev_i32_e32 v2, 31, v3
+; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    v_mov_b32_e32 v3, v2
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <2 x i64>, ptr %arg1.ptr, !range !4, !noundef !{}
+  %ashr = ashr <2 x i64> %arg0, %shift.amt
+  ret <2 x i64> %ashr
+}
+
 ; Exact attribute does not inhibit reduction
 define <2 x i64> @ashr_exact_v2_metadata(<2 x i64> %arg0, ptr %arg1.ptr) {
 ; CHECK-LABEL: ashr_exact_v2_metadata:
@@ -194,6 +208,7 @@ define <4 x i64> @ashr_v4_metadata(<4 x i64> %arg0, ptr %arg1.ptr) {
 !1 = !{i64 32, i64 38, i64 42, i64 48}
 !2 = !{i64 31, i64 38, i64 42, i64 48}
 !3 = !{i64 32, i64 38, i64 2147483680, i64 2147483681}
+!4 = !{i64 63, i64 64}
 
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 ; Test range with an "or X, 16"

>From af255722f8524e0e8ca3a4b004da17a07e20f310 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Fri, 20 Jun 2025 09:45:58 -0500
Subject: [PATCH 8/8] Address comments

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 1ef314fa439aa..c3b6f6d95f083 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4161,7 +4161,7 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
   if (VT.getScalarType() != MVT::i64)
     return SDValue();
 
-  // for C >= 32
+  // For C >= 32
   // i64 (sra x, C) -> (build_pair (sra hi_32(x), C - 32), sra hi_32(x), 31))
 
   // On some subtargets, 64-bit shift is a quarter rate instruction. In the
@@ -4182,7 +4182,6 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
   SDValue ShiftAmt;
   if (CRHS) {
     unsigned RHSVal = CRHS->getZExtValue();
-
     ShiftAmt = DAG.getConstant(RHSVal - TargetScalarType.getSizeInBits(), SL,
                                TargetType);
   } else if (Known.getMinValue().getZExtValue() ==



More information about the llvm-commits mailing list