[llvm] [AMDGPU][SDAG] Try folding "lshr i64 + mad" to "mad_[iu]64_[iu]32" (PR #119218)

Vikram Hegde via llvm-commits llvm-commits at lists.llvm.org
Sun Jan 5 22:09:53 PST 2025


https://github.com/vikramRH updated https://github.com/llvm/llvm-project/pull/119218

>From ebd00b33096e21185174d93545ff2e9806be7060 Mon Sep 17 00:00:00 2001
From: vikhegde <vikram.hegde at amd.com>
Date: Mon, 9 Dec 2024 19:25:14 +0530
Subject: [PATCH 1/5] [AMDGPU][SDAG] Try folding "lshr i64 + mad" to
 "mad_[iu]64_[iu]32"

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 49 +++++++++++++++++++++++
 1 file changed, 49 insertions(+)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 58b061f5c1af0d..c11fb3bb752463 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -13857,6 +13857,52 @@ static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL, EVT VT,
   return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad);
 }
 
+// Fold
+//     y = lshr i64 x, 32
+//     res = add (mul i64 y, Constant), x   where "Constant" is a 32 bit
+//     negative value
+// To
+//     res = mad_u64_u32 y.lo ,Constant.lo, x.lo
+static SDValue tryFoldMADwithSRL(SelectionDAG &DAG, const SDLoc &SL,
+                                 SDValue MulLHS, SDValue MulRHS,
+                                 SDValue AddRHS) {
+
+  if (MulLHS.getValueType() != MVT::i64)
+    return SDValue();
+
+  ConstantSDNode *ConstOp;
+  SDValue ShiftOp;
+  if (MulLHS.getOpcode() == ISD::SRL && MulRHS.getOpcode() == ISD::Constant) {
+    ConstOp = cast<ConstantSDNode>(MulRHS.getNode());
+    ShiftOp = MulLHS;
+  } else if (MulRHS.getOpcode() == ISD::SRL &&
+             MulLHS.getOpcode() == ISD::Constant) {
+    ConstOp = cast<ConstantSDNode>(MulLHS.getNode());
+    ShiftOp = MulRHS;
+  } else
+    return SDValue();
+
+  if (ShiftOp.getOperand(1).getOpcode() != ISD::Constant ||
+      AddRHS != ShiftOp.getOperand(0))
+    return SDValue();
+
+  if (cast<ConstantSDNode>(ShiftOp->getOperand(1))->getAsZExtVal() != 32)
+    return SDValue();
+
+  APInt ConstVal = ConstOp->getAPIntValue();
+  if (!ConstVal.isNegative() || !ConstVal.isSignedIntN(33))
+    return SDValue();
+
+  SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
+  SDValue ConstMul = DAG.getConstant(
+      ConstVal.getZExtValue() & 0x00000000FFFFFFFF, SL, MVT::i32);
+  AddRHS = DAG.getNode(ISD::AND, SL, MVT::i64, AddRHS,
+                       DAG.getConstant(0x00000000FFFFFFFF, SL, MVT::i64));
+  return getMad64_32(DAG, SL, MVT::i64,
+                     DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, MulLHS), ConstMul,
+                     AddRHS, false);
+}
+
 // Fold (add (mul x, y), z) --> (mad_[iu]64_[iu]32 x, y, z) plus high
 // multiplies, if any.
 //
@@ -13915,6 +13961,9 @@ SDValue SITargetLowering::tryFoldToMad64_32(SDNode *N,
   SDValue MulRHS = LHS.getOperand(1);
   SDValue AddRHS = RHS;
 
+  if (SDValue FoldedMAD = tryFoldMADwithSRL(DAG, SL, MulLHS, MulRHS, AddRHS))
+    return FoldedMAD;
+
   // Always check whether operands are small unsigned values, since that
   // knowledge is useful in more cases. Check for small signed values only if
   // doing so can unlock a shorter code sequence.

>From 114095ca9193eaa813cdc3191622ef8abeb44c20 Mon Sep 17 00:00:00 2001
From: vikhegde <vikram.hegde at amd.com>
Date: Tue, 10 Dec 2024 08:23:57 +0000
Subject: [PATCH 2/5] review comments

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 45 ++++++++++-------------
 1 file changed, 19 insertions(+), 26 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index c11fb3bb752463..5e9b5aefd04eca 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -13859,43 +13859,30 @@ static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL, EVT VT,
 
 // Fold
 //     y = lshr i64 x, 32
-//     res = add (mul i64 y, Constant), x   where "Constant" is a 32 bit
-//     negative value
+//     res = add (mul i64 y, Const), x   where "Const" is a 64-bit constant
+//     with Const.hi == -1
 // To
-//     res = mad_u64_u32 y.lo ,Constant.lo, x.lo
+//     res = mad_u64_u32 y.lo ,Const.lo, x.lo
 static SDValue tryFoldMADwithSRL(SelectionDAG &DAG, const SDLoc &SL,
                                  SDValue MulLHS, SDValue MulRHS,
                                  SDValue AddRHS) {
 
-  if (MulLHS.getValueType() != MVT::i64)
+  if (MulLHS.getValueType() != MVT::i64 || MulLHS.getOpcode() != ISD::SRL)
     return SDValue();
 
-  ConstantSDNode *ConstOp;
-  SDValue ShiftOp;
-  if (MulLHS.getOpcode() == ISD::SRL && MulRHS.getOpcode() == ISD::Constant) {
-    ConstOp = cast<ConstantSDNode>(MulRHS.getNode());
-    ShiftOp = MulLHS;
-  } else if (MulRHS.getOpcode() == ISD::SRL &&
-             MulLHS.getOpcode() == ISD::Constant) {
-    ConstOp = cast<ConstantSDNode>(MulLHS.getNode());
-    ShiftOp = MulRHS;
-  } else
-    return SDValue();
-
-  if (ShiftOp.getOperand(1).getOpcode() != ISD::Constant ||
-      AddRHS != ShiftOp.getOperand(0))
+  if (MulLHS.getOperand(1).getOpcode() != ISD::Constant ||
+      MulLHS.getOperand(0) != AddRHS)
     return SDValue();
 
-  if (cast<ConstantSDNode>(ShiftOp->getOperand(1))->getAsZExtVal() != 32)
+  if (cast<ConstantSDNode>(MulLHS->getOperand(1))->getAsZExtVal() != 32)
     return SDValue();
 
-  APInt ConstVal = ConstOp->getAPIntValue();
-  if (!ConstVal.isNegative() || !ConstVal.isSignedIntN(33))
+  APInt Const = cast<ConstantSDNode>(MulRHS.getNode())->getAPIntValue();
+  if (!Const.isNegative() || !Const.isSignedIntN(33))
     return SDValue();
 
-  SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
-  SDValue ConstMul = DAG.getConstant(
-      ConstVal.getZExtValue() & 0x00000000FFFFFFFF, SL, MVT::i32);
+  SDValue ConstMul =
+      DAG.getConstant(Const.getZExtValue() & 0x00000000FFFFFFFF, SL, MVT::i32);
   AddRHS = DAG.getNode(ISD::AND, SL, MVT::i64, AddRHS,
                        DAG.getConstant(0x00000000FFFFFFFF, SL, MVT::i64));
   return getMad64_32(DAG, SL, MVT::i64,
@@ -13961,8 +13948,14 @@ SDValue SITargetLowering::tryFoldToMad64_32(SDNode *N,
   SDValue MulRHS = LHS.getOperand(1);
   SDValue AddRHS = RHS;
 
-  if (SDValue FoldedMAD = tryFoldMADwithSRL(DAG, SL, MulLHS, MulRHS, AddRHS))
-    return FoldedMAD;
+  if (MulLHS.getOpcode() == ISD::Constant ||
+      MulRHS.getOpcode() == ISD::Constant) {
+    if (MulRHS.getOpcode() == ISD::SRL)
+      std::swap(MulLHS, MulRHS);
+
+    if (SDValue FoldedMAD = tryFoldMADwithSRL(DAG, SL, MulLHS, MulRHS, AddRHS))
+      return FoldedMAD;
+  }
 
   // Always check whether operands are small unsigned values, since that
   // knowledge is useful in more cases. Check for small signed values only if

>From bfe3fae0706f67219fed5b2e6d57d03e889e252e Mon Sep 17 00:00:00 2001
From: vikhegde <vikram.hegde at amd.com>
Date: Tue, 17 Dec 2024 12:24:13 +0530
Subject: [PATCH 3/5] review comments

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 15 ++++++---------
 1 file changed, 6 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 5e9b5aefd04eca..d49852652ff73f 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -13870,24 +13870,22 @@ static SDValue tryFoldMADwithSRL(SelectionDAG &DAG, const SDLoc &SL,
   if (MulLHS.getValueType() != MVT::i64 || MulLHS.getOpcode() != ISD::SRL)
     return SDValue();
 
-  if (MulLHS.getOperand(1).getOpcode() != ISD::Constant ||
-      MulLHS.getOperand(0) != AddRHS)
+  ConstantSDNode *ShiftVal = dyn_cast<ConstantSDNode>(MulLHS.getOperand(1));
+  if (!ShiftVal || MulLHS.getOperand(0) != AddRHS)
     return SDValue();
 
-  if (cast<ConstantSDNode>(MulLHS->getOperand(1))->getAsZExtVal() != 32)
+  if (ShiftVal->getAsZExtVal() != 32)
     return SDValue();
 
-  APInt Const = cast<ConstantSDNode>(MulRHS.getNode())->getAPIntValue();
+  APInt Const = dyn_cast<ConstantSDNode>(MulRHS.getNode())->getAPIntValue();
   if (!Const.isNegative() || !Const.isSignedIntN(33))
     return SDValue();
 
   SDValue ConstMul =
       DAG.getConstant(Const.getZExtValue() & 0x00000000FFFFFFFF, SL, MVT::i32);
-  AddRHS = DAG.getNode(ISD::AND, SL, MVT::i64, AddRHS,
-                       DAG.getConstant(0x00000000FFFFFFFF, SL, MVT::i64));
   return getMad64_32(DAG, SL, MVT::i64,
                      DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, MulLHS), ConstMul,
-                     AddRHS, false);
+                     DAG.getZeroExtendInReg(AddRHS, SL, MVT::i32), false);
 }
 
 // Fold (add (mul x, y), z) --> (mad_[iu]64_[iu]32 x, y, z) plus high
@@ -13948,8 +13946,7 @@ SDValue SITargetLowering::tryFoldToMad64_32(SDNode *N,
   SDValue MulRHS = LHS.getOperand(1);
   SDValue AddRHS = RHS;
 
-  if (MulLHS.getOpcode() == ISD::Constant ||
-      MulRHS.getOpcode() == ISD::Constant) {
+  if (isa<ConstantSDNode>(MulLHS) || isa<ConstantSDNode>(MulRHS)) {
     if (MulRHS.getOpcode() == ISD::SRL)
       std::swap(MulLHS, MulRHS);
 

>From a885dfcd91495b7edc688e422aebc212c04d0482 Mon Sep 17 00:00:00 2001
From: vikhegde <vikram.hegde at amd.com>
Date: Wed, 1 Jan 2025 11:10:31 +0530
Subject: [PATCH 4/5] update tests

---
 llvm/test/CodeGen/AMDGPU/mad_64_32.ll | 271 +++++++++++++++-----------
 1 file changed, 156 insertions(+), 115 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
index 3be17f9538d0fa..7d18739fd0c327 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
@@ -1337,10 +1337,10 @@ define i64 @lshr_mad_i64_1(i64 %arg0, i64 %arg1) #0 {
 ; CI-LABEL: lshr_mad_i64_1:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v2, v1
+; CI-NEXT:    v_mov_b32_e32 v1, 0
 ; CI-NEXT:    s_movk_i32 s4, 0xfc19
-; CI-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1]
-; CI-NEXT:    v_sub_i32_e32 v1, vcc, v3, v1
-; CI-NEXT:    v_mov_b32_e32 v0, v2
+; CI-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[0:1]
 ; CI-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; SI-LABEL: lshr_mad_i64_1:
@@ -1357,20 +1357,28 @@ define i64 @lshr_mad_i64_1(i64 %arg0, i64 %arg1) #0 {
 ; GFX9-LABEL: lshr_mad_i64_1:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, v1
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX9-NEXT:    s_movk_i32 s4, 0xfc19
-; GFX9-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1]
-; GFX9-NEXT:    v_sub_u32_e32 v1, v3, v1
-; GFX9-NEXT:    v_mov_b32_e32 v0, v2
+; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[0:1]
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX11-LABEL: lshr_mad_i64_1:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_mad_u64_u32 v[2:3], null, 0xfffffc19, v1, v[0:1]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_sub_nc_u32_e32 v1, v3, v1
-; GFX11-NEXT:    v_mov_b32_e32 v0, v2
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
+; GFX1100-LABEL: lshr_mad_i64_1:
+; GFX1100:       ; %bb.0:
+; GFX1100-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-NEXT:    v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v1, 0
+; GFX1100-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1100-NEXT:    v_mad_u64_u32 v[2:3], null, 0xfffffc19, v4, v[0:1]
+; GFX1100-NEXT:    v_dual_mov_b32 v0, v2 :: v_dual_mov_b32 v1, v3
+; GFX1100-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1150-LABEL: lshr_mad_i64_1:
+; GFX1150:       ; %bb.0:
+; GFX1150-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1150-NEXT:    v_dual_mov_b32 v2, v1 :: v_dual_mov_b32 v1, 0
+; GFX1150-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1150-NEXT:    v_mad_u64_u32 v[0:1], null, 0xfffffc19, v2, v[0:1]
+; GFX1150-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX12-LABEL: lshr_mad_i64_1:
 ; GFX12:       ; %bb.0:
@@ -1379,10 +1387,9 @@ define i64 @lshr_mad_i64_1(i64 %arg0, i64 %arg1) #0 {
 ; GFX12-NEXT:    s_wait_samplecnt 0x0
 ; GFX12-NEXT:    s_wait_bvhcnt 0x0
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
-; GFX12-NEXT:    v_mad_co_u64_u32 v[2:3], null, 0xfffffc19, v1, v[0:1]
-; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT:    v_sub_nc_u32_e32 v1, v3, v1
-; GFX12-NEXT:    v_mov_b32_e32 v0, v2
+; GFX12-NEXT:    v_dual_mov_b32 v2, v1 :: v_dual_mov_b32 v1, 0
+; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT:    v_mad_co_u64_u32 v[0:1], null, 0xfffffc19, v2, v[0:1]
 ; GFX12-NEXT:    s_setpc_b64 s[30:31]
   %lsh = lshr i64 %arg0, 32
   %mul = mul i64 %lsh, s0xfffffffffffffc19
@@ -1395,10 +1402,10 @@ define i64 @lshr_mad_i64_2(i64 %arg0) #0 {
 ; CI-LABEL: lshr_mad_i64_2:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v2, v1
+; CI-NEXT:    v_mov_b32_e32 v1, 0
 ; CI-NEXT:    s_movk_i32 s4, 0xd1
-; CI-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1]
-; CI-NEXT:    v_sub_i32_e32 v1, vcc, v3, v1
-; CI-NEXT:    v_mov_b32_e32 v0, v2
+; CI-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[0:1]
 ; CI-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; SI-LABEL: lshr_mad_i64_2:
@@ -1415,20 +1422,28 @@ define i64 @lshr_mad_i64_2(i64 %arg0) #0 {
 ; GFX9-LABEL: lshr_mad_i64_2:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, v1
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX9-NEXT:    s_movk_i32 s4, 0xd1
-; GFX9-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1]
-; GFX9-NEXT:    v_sub_u32_e32 v1, v3, v1
-; GFX9-NEXT:    v_mov_b32_e32 v0, v2
+; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[0:1]
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX11-LABEL: lshr_mad_i64_2:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_mad_u64_u32 v[2:3], null, 0xd1, v1, v[0:1]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_sub_nc_u32_e32 v1, v3, v1
-; GFX11-NEXT:    v_mov_b32_e32 v0, v2
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
+; GFX1100-LABEL: lshr_mad_i64_2:
+; GFX1100:       ; %bb.0:
+; GFX1100-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-NEXT:    v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v1, 0
+; GFX1100-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1100-NEXT:    v_mad_u64_u32 v[2:3], null, 0xd1, v4, v[0:1]
+; GFX1100-NEXT:    v_dual_mov_b32 v0, v2 :: v_dual_mov_b32 v1, v3
+; GFX1100-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1150-LABEL: lshr_mad_i64_2:
+; GFX1150:       ; %bb.0:
+; GFX1150-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1150-NEXT:    v_dual_mov_b32 v2, v1 :: v_dual_mov_b32 v1, 0
+; GFX1150-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1150-NEXT:    v_mad_u64_u32 v[0:1], null, 0xd1, v2, v[0:1]
+; GFX1150-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX12-LABEL: lshr_mad_i64_2:
 ; GFX12:       ; %bb.0:
@@ -1437,10 +1452,9 @@ define i64 @lshr_mad_i64_2(i64 %arg0) #0 {
 ; GFX12-NEXT:    s_wait_samplecnt 0x0
 ; GFX12-NEXT:    s_wait_bvhcnt 0x0
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
-; GFX12-NEXT:    v_mad_co_u64_u32 v[2:3], null, 0xd1, v1, v[0:1]
-; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT:    v_sub_nc_u32_e32 v1, v3, v1
-; GFX12-NEXT:    v_mov_b32_e32 v0, v2
+; GFX12-NEXT:    v_dual_mov_b32 v2, v1 :: v_dual_mov_b32 v1, 0
+; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT:    v_mad_co_u64_u32 v[0:1], null, 0xd1, v2, v[0:1]
 ; GFX12-NEXT:    s_setpc_b64 s[30:31]
   %lsh = lshr i64 %arg0, 32
   %mul = mul i64 %lsh, s0xffffffff000000d1
@@ -1453,10 +1467,10 @@ define i64 @lshr_mad_i64_3(i64 %arg0) #0 {
 ; CI-LABEL: lshr_mad_i64_3:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v2, v1
+; CI-NEXT:    v_mov_b32_e32 v1, 0
 ; CI-NEXT:    s_movk_i32 s4, 0xfc88
-; CI-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1]
-; CI-NEXT:    v_sub_i32_e32 v1, vcc, v3, v1
-; CI-NEXT:    v_mov_b32_e32 v0, v2
+; CI-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[0:1]
 ; CI-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; SI-LABEL: lshr_mad_i64_3:
@@ -1473,20 +1487,28 @@ define i64 @lshr_mad_i64_3(i64 %arg0) #0 {
 ; GFX9-LABEL: lshr_mad_i64_3:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, v1
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX9-NEXT:    s_movk_i32 s4, 0xfc88
-; GFX9-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1]
-; GFX9-NEXT:    v_sub_u32_e32 v1, v3, v1
-; GFX9-NEXT:    v_mov_b32_e32 v0, v2
+; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[0:1]
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX11-LABEL: lshr_mad_i64_3:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_mad_u64_u32 v[2:3], null, 0xfffffc88, v1, v[0:1]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_sub_nc_u32_e32 v1, v3, v1
-; GFX11-NEXT:    v_mov_b32_e32 v0, v2
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
+; GFX1100-LABEL: lshr_mad_i64_3:
+; GFX1100:       ; %bb.0:
+; GFX1100-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-NEXT:    v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v1, 0
+; GFX1100-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1100-NEXT:    v_mad_u64_u32 v[2:3], null, 0xfffffc88, v4, v[0:1]
+; GFX1100-NEXT:    v_dual_mov_b32 v0, v2 :: v_dual_mov_b32 v1, v3
+; GFX1100-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1150-LABEL: lshr_mad_i64_3:
+; GFX1150:       ; %bb.0:
+; GFX1150-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1150-NEXT:    v_dual_mov_b32 v2, v1 :: v_dual_mov_b32 v1, 0
+; GFX1150-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1150-NEXT:    v_mad_u64_u32 v[0:1], null, 0xfffffc88, v2, v[0:1]
+; GFX1150-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX12-LABEL: lshr_mad_i64_3:
 ; GFX12:       ; %bb.0:
@@ -1495,10 +1517,9 @@ define i64 @lshr_mad_i64_3(i64 %arg0) #0 {
 ; GFX12-NEXT:    s_wait_samplecnt 0x0
 ; GFX12-NEXT:    s_wait_bvhcnt 0x0
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
-; GFX12-NEXT:    v_mad_co_u64_u32 v[2:3], null, 0xfffffc88, v1, v[0:1]
-; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT:    v_sub_nc_u32_e32 v1, v3, v1
-; GFX12-NEXT:    v_mov_b32_e32 v0, v2
+; GFX12-NEXT:    v_dual_mov_b32 v2, v1 :: v_dual_mov_b32 v1, 0
+; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT:    v_mad_co_u64_u32 v[0:1], null, 0xfffffc88, v2, v[0:1]
 ; GFX12-NEXT:    s_setpc_b64 s[30:31]
   %lsh = lshr i64 %arg0, 32
   %mul = mul i64 s0xfffffffffffffc88, %lsh
@@ -1511,12 +1532,12 @@ define i64 @lshr_mad_i64_4(i32 %arg0, i64 %arg1) #0 {
 ; CI-LABEL: lshr_mad_i64_4:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CI-NEXT:    v_mul_lo_u32 v3, v2, v0
-; CI-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], v1, v0, 0
+; CI-NEXT:    v_mul_lo_u32 v2, v2, v0
+; CI-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v1, v0, 0
 ; CI-NEXT:    s_movk_i32 s4, 0xfc88
-; CI-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; CI-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[1:2]
-; CI-NEXT:    v_sub_i32_e32 v1, vcc, v1, v2
+; CI-NEXT:    v_add_i32_e32 v2, vcc, v1, v2
+; CI-NEXT:    v_mov_b32_e32 v1, 0
+; CI-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[0:1]
 ; CI-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; SI-LABEL: lshr_mad_i64_4:
@@ -1539,26 +1560,33 @@ define i64 @lshr_mad_i64_4(i32 %arg0, i64 %arg1) #0 {
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX9-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v1, v0, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v6, v5
-; GFX9-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v2, v0, v[6:7]
-; GFX9-NEXT:    v_mov_b32_e32 v5, v2
+; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v2, v0, v[6:7]
+; GFX9-NEXT:    v_mov_b32_e32 v5, 0
 ; GFX9-NEXT:    s_movk_i32 s4, 0xfc88
-; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[4:5]
-; GFX9-NEXT:    v_sub_u32_e32 v1, v1, v2
+; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, s4, v[4:5]
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX11-LABEL: lshr_mad_i64_4:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_mad_u64_u32 v[3:4], null, v1, v0, 0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_mov_b32_e32 v1, v4
-; GFX11-NEXT:    v_mad_u64_u32 v[5:6], null, v2, v0, v[1:2]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_mov_b32_e32 v4, v5
-; GFX11-NEXT:    v_mad_u64_u32 v[0:1], null, 0xfffffc88, v5, v[3:4]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_sub_nc_u32_e32 v1, v1, v5
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
+; GFX1100-LABEL: lshr_mad_i64_4:
+; GFX1100:       ; %bb.0:
+; GFX1100-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-NEXT:    v_mad_u64_u32 v[3:4], null, v1, v0, 0
+; GFX1100-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1100-NEXT:    v_dual_mov_b32 v1, v4 :: v_dual_mov_b32 v4, 0
+; GFX1100-NEXT:    v_mad_u64_u32 v[5:6], null, v2, v0, v[1:2]
+; GFX1100-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1100-NEXT:    v_mad_u64_u32 v[0:1], null, 0xfffffc88, v5, v[3:4]
+; GFX1100-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1150-LABEL: lshr_mad_i64_4:
+; GFX1150:       ; %bb.0:
+; GFX1150-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1150-NEXT:    v_mad_u64_u32 v[3:4], null, v1, v0, 0
+; GFX1150-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1150-NEXT:    v_dual_mov_b32 v1, v4 :: v_dual_mov_b32 v4, 0
+; GFX1150-NEXT:    v_mad_u64_u32 v[0:1], null, v2, v0, v[1:2]
+; GFX1150-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1150-NEXT:    v_mad_u64_u32 v[0:1], null, 0xfffffc88, v0, v[3:4]
+; GFX1150-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX12-LABEL: lshr_mad_i64_4:
 ; GFX12:       ; %bb.0:
@@ -1569,13 +1597,10 @@ define i64 @lshr_mad_i64_4(i32 %arg0, i64 %arg1) #0 {
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-NEXT:    v_mad_co_u64_u32 v[3:4], null, v1, v0, 0
 ; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT:    v_mov_b32_e32 v1, v4
-; GFX12-NEXT:    v_mad_co_u64_u32 v[5:6], null, v2, v0, v[1:2]
-; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT:    v_mov_b32_e32 v4, v5
-; GFX12-NEXT:    v_mad_co_u64_u32 v[0:1], null, 0xfffffc88, v5, v[3:4]
+; GFX12-NEXT:    v_dual_mov_b32 v1, v4 :: v_dual_mov_b32 v4, 0
+; GFX12-NEXT:    v_mad_co_u64_u32 v[0:1], null, v2, v0, v[1:2]
 ; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT:    v_sub_nc_u32_e32 v1, v1, v5
+; GFX12-NEXT:    v_mad_co_u64_u32 v[0:1], null, 0xfffffc88, v0, v[3:4]
 ; GFX12-NEXT:    s_setpc_b64 s[30:31]
   %ext = zext i32 %arg0 to i64
   %mul1 = mul i64 %arg1, %ext
@@ -1862,10 +1887,9 @@ define amdgpu_ps i64 @lshr_mad_i64_sgpr(i64 inreg %arg0) #0 {
 ; CI-LABEL: lshr_mad_i64_sgpr:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    v_mov_b32_e32 v0, s0
+; CI-NEXT:    v_mov_b32_e32 v1, 0
 ; CI-NEXT:    v_mov_b32_e32 v2, 0xffff1c18
-; CI-NEXT:    v_mov_b32_e32 v1, s1
-; CI-NEXT:    v_mad_u64_u32 v[0:1], s[2:3], s1, v2, v[0:1]
-; CI-NEXT:    v_subrev_i32_e32 v1, vcc, s1, v1
+; CI-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], s1, v2, v[0:1]
 ; CI-NEXT:    v_readfirstlane_b32 s0, v0
 ; CI-NEXT:    v_readfirstlane_b32 s1, v1
 ; CI-NEXT:    ; return to shader part epilog
@@ -1920,14 +1944,16 @@ define <2 x i64> @lshr_mad_i64_vec(<2 x i64> %arg0) #0 {
 ; CI-LABEL: lshr_mad_i64_vec:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT:    v_mov_b32_e32 v6, v3
+; CI-NEXT:    v_mov_b32_e32 v3, v1
+; CI-NEXT:    v_mov_b32_e32 v1, 0
 ; CI-NEXT:    s_mov_b32 s4, 0xffff1c18
-; CI-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v1, s4, v[0:1]
+; CI-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v3, s4, v[0:1]
+; CI-NEXT:    v_mov_b32_e32 v3, v1
 ; CI-NEXT:    s_mov_b32 s4, 0xffff1118
-; CI-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], v3, s4, v[2:3]
-; CI-NEXT:    v_sub_i32_e32 v1, vcc, v5, v1
-; CI-NEXT:    v_sub_i32_e32 v3, vcc, v7, v3
+; CI-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v6, s4, v[2:3]
 ; CI-NEXT:    v_mov_b32_e32 v0, v4
-; CI-NEXT:    v_mov_b32_e32 v2, v6
+; CI-NEXT:    v_mov_b32_e32 v1, v5
 ; CI-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; SI-LABEL: lshr_mad_i64_vec:
@@ -1950,28 +1976,44 @@ define <2 x i64> @lshr_mad_i64_vec(<2 x i64> %arg0) #0 {
 ; GFX9-LABEL: lshr_mad_i64_vec:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v6, v3
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX9-NEXT:    s_mov_b32 s4, 0xffff1c18
-; GFX9-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v1, s4, v[0:1]
+; GFX9-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v3, s4, v[0:1]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v1
 ; GFX9-NEXT:    s_mov_b32 s4, 0xffff1118
-; GFX9-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], v3, s4, v[2:3]
-; GFX9-NEXT:    v_sub_u32_e32 v1, v5, v1
-; GFX9-NEXT:    v_sub_u32_e32 v3, v7, v3
+; GFX9-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v6, s4, v[2:3]
 ; GFX9-NEXT:    v_mov_b32_e32 v0, v4
-; GFX9-NEXT:    v_mov_b32_e32 v2, v6
+; GFX9-NEXT:    v_mov_b32_e32 v1, v5
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX11-LABEL: lshr_mad_i64_vec:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_mad_u64_u32 v[4:5], null, 0xffff1c18, v1, v[0:1]
-; GFX11-NEXT:    v_mad_u64_u32 v[6:7], null, 0xffff1118, v3, v[2:3]
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_sub_nc_u32_e32 v1, v5, v1
-; GFX11-NEXT:    v_mov_b32_e32 v0, v4
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_sub_nc_u32_e32 v3, v7, v3
-; GFX11-NEXT:    v_mov_b32_e32 v2, v6
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
+; GFX1100-LABEL: lshr_mad_i64_vec:
+; GFX1100:       ; %bb.0:
+; GFX1100-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-NEXT:    v_mov_b32_e32 v8, v3
+; GFX1100-NEXT:    v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v1, 0
+; GFX1100-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1100-NEXT:    v_mad_u64_u32 v[4:5], null, 0xffff1c18, v6, v[0:1]
+; GFX1100-NEXT:    v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v0, v4
+; GFX1100-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1100-NEXT:    v_mad_u64_u32 v[6:7], null, 0xffff1118, v8, v[2:3]
+; GFX1100-NEXT:    v_dual_mov_b32 v1, v5 :: v_dual_mov_b32 v2, v6
+; GFX1100-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1100-NEXT:    v_mov_b32_e32 v3, v7
+; GFX1100-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1150-LABEL: lshr_mad_i64_vec:
+; GFX1150:       ; %bb.0:
+; GFX1150-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1150-NEXT:    v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v5, v1
+; GFX1150-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1150-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1150-NEXT:    v_mov_b32_e32 v3, v1
+; GFX1150-NEXT:    v_mad_u64_u32 v[0:1], null, 0xffff1c18, v5, v[0:1]
+; GFX1150-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1150-NEXT:    v_mad_u64_u32 v[2:3], null, 0xffff1118, v4, v[2:3]
+; GFX1150-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX12-LABEL: lshr_mad_i64_vec:
 ; GFX12:       ; %bb.0:
@@ -1980,14 +2022,13 @@ define <2 x i64> @lshr_mad_i64_vec(<2 x i64> %arg0) #0 {
 ; GFX12-NEXT:    s_wait_samplecnt 0x0
 ; GFX12-NEXT:    s_wait_bvhcnt 0x0
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
-; GFX12-NEXT:    v_mad_co_u64_u32 v[4:5], null, 0xffff1c18, v1, v[0:1]
-; GFX12-NEXT:    v_mad_co_u64_u32 v[6:7], null, 0xffff1118, v3, v[2:3]
-; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX12-NEXT:    v_sub_nc_u32_e32 v1, v5, v1
-; GFX12-NEXT:    v_mov_b32_e32 v0, v4
-; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-NEXT:    v_sub_nc_u32_e32 v3, v7, v3
-; GFX12-NEXT:    v_mov_b32_e32 v2, v6
+; GFX12-NEXT:    v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v5, v1
+; GFX12-NEXT:    v_mov_b32_e32 v1, 0
+; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT:    v_mov_b32_e32 v3, v1
+; GFX12-NEXT:    v_mad_co_u64_u32 v[0:1], null, 0xffff1c18, v5, v[0:1]
+; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT:    v_mad_co_u64_u32 v[2:3], null, 0xffff1118, v4, v[2:3]
 ; GFX12-NEXT:    s_setpc_b64 s[30:31]
   %lsh = lshr <2 x i64> %arg0, <i64 32, i64 32>
   %mul = mul <2 x i64> %lsh, <i64 s0xffffffffffff1c18, i64 s0xffffffffffff1118>

>From 0f4ea1a7ec0a8ca2280444c604567452e8ecc0fb Mon Sep 17 00:00:00 2001
From: vikhegde <vikram.hegde at amd.com>
Date: Mon, 6 Jan 2025 11:18:49 +0530
Subject: [PATCH 5/5] review comment

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index d49852652ff73f..07b6b73cf6c93c 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -13877,12 +13877,11 @@ static SDValue tryFoldMADwithSRL(SelectionDAG &DAG, const SDLoc &SL,
   if (ShiftVal->getAsZExtVal() != 32)
     return SDValue();
 
-  APInt Const = dyn_cast<ConstantSDNode>(MulRHS.getNode())->getAPIntValue();
-  if (!Const.isNegative() || !Const.isSignedIntN(33))
+  uint64_t Const = dyn_cast<ConstantSDNode>(MulRHS.getNode())->getZExtValue();
+  if (Hi_32(Const) != -1)
     return SDValue();
 
-  SDValue ConstMul =
-      DAG.getConstant(Const.getZExtValue() & 0x00000000FFFFFFFF, SL, MVT::i32);
+  SDValue ConstMul = DAG.getConstant(Const & 0x00000000FFFFFFFF, SL, MVT::i32);
   return getMad64_32(DAG, SL, MVT::i64,
                      DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, MulLHS), ConstMul,
                      DAG.getZeroExtendInReg(AddRHS, SL, MVT::i32), false);



More information about the llvm-commits mailing list