[llvm] Re apply 130577 narrow math for and operand (PR #133896)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 10 20:21:00 PDT 2025
https://github.com/Shoreshen updated https://github.com/llvm/llvm-project/pull/133896
>From 10bb8da1eed6060b37f61b6598ea59c9f1baec21 Mon Sep 17 00:00:00 2001
From: Shoreshen <372660931 at qq.com>
Date: Tue, 1 Apr 2025 17:52:44 +0800
Subject: [PATCH 1/5] =?UTF-8?q?Revert=20"Revert=20"[AMDGPU][CodeGenPrepare?=
=?UTF-8?q?]=20Narrow=2064=20bit=20math=20to=2032=20bit=20if=20prof?=
=?UTF-8?q?=E2=80=A6"?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This reverts commit 7f14b2a9eb4792155ed31da7bc16cc58cbb1b0fc.
---
.../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 84 +++++++
.../AMDGPU/amdgpu-codegenprepare-mul24.ll | 5 +-
.../atomic_optimizations_global_pointer.ll | 52 ++--
.../CodeGen/AMDGPU/narrow_math_for_and.ll | 231 ++++++++++++++++++
llvm/test/CodeGen/AMDGPU/widen-smrd-loads.ll | 9 +-
5 files changed, 347 insertions(+), 34 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 9c482aeb3ea5c..eb5c160670992 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -1561,6 +1561,87 @@ void AMDGPUCodeGenPrepareImpl::expandDivRem64(BinaryOperator &I) const {
llvm_unreachable("not a division");
}
+Type *findSmallestLegalBits(Instruction *I, int OrigBit, int MaxBitsNeeded,
+ const TargetLowering *TLI, const DataLayout &DL) {
+ if (MaxBitsNeeded >= OrigBit)
+ return nullptr;
+
+ Type *NewType = I->getType()->getWithNewBitWidth(MaxBitsNeeded);
+ while (OrigBit > MaxBitsNeeded) {
+ if (TLI->isOperationLegalOrCustom(
+ TLI->InstructionOpcodeToISD(I->getOpcode()),
+ TLI->getValueType(DL, NewType, true)))
+ return NewType;
+
+ MaxBitsNeeded *= 2;
+ NewType = I->getType()->getWithNewBitWidth(MaxBitsNeeded);
+ }
+ return nullptr;
+}
+
+static bool tryNarrowMathIfNoOverflow(Instruction *I, const TargetLowering *TLI,
+ const TargetTransformInfo &TTI,
+ const DataLayout &DL) {
+ unsigned Opc = I->getOpcode();
+ Type *OldType = I->getType();
+
+ if (Opc != Instruction::Add && Opc != Instruction::Mul)
+ return false;
+
+ unsigned OrigBit = OldType->getScalarSizeInBits();
+ unsigned MaxBitsNeeded = OrigBit;
+
+ switch (Opc) {
+ case Instruction::Add:
+ MaxBitsNeeded = KnownBits::add(computeKnownBits(I->getOperand(0), DL),
+ computeKnownBits(I->getOperand(1), DL))
+ .countMaxActiveBits();
+ break;
+ case Instruction::Mul:
+ MaxBitsNeeded = KnownBits::mul(computeKnownBits(I->getOperand(0), DL),
+ computeKnownBits(I->getOperand(1), DL))
+ .countMaxActiveBits();
+ break;
+ default:
+ llvm_unreachable("Unexpected opcode, only valid for Instruction::Add and "
+ "Instruction::Mul.");
+ }
+
+ MaxBitsNeeded = std::max<unsigned>(bit_ceil(MaxBitsNeeded), 8);
+ Type *NewType = findSmallestLegalBits(I, OrigBit, MaxBitsNeeded, TLI, DL);
+
+ if (!NewType)
+ return false;
+
+ // Old cost
+ InstructionCost OldCost =
+ TTI.getArithmeticInstrCost(Opc, OldType, TTI::TCK_RecipThroughput);
+ // New cost of new op
+ InstructionCost NewCost =
+ TTI.getArithmeticInstrCost(Opc, NewType, TTI::TCK_RecipThroughput);
+ // New cost of narrowing 2 operands (use trunc)
+ NewCost += 2 * TTI.getCastInstrCost(Instruction::Trunc, NewType, OldType,
+ TTI.getCastContextHint(I),
+ TTI::TCK_RecipThroughput);
+ // New cost of zext narrowed result to original type
+ NewCost +=
+ TTI.getCastInstrCost(Instruction::ZExt, OldType, NewType,
+ TTI.getCastContextHint(I), TTI::TCK_RecipThroughput);
+ if (NewCost >= OldCost)
+ return false;
+
+ IRBuilder<> Builder(I);
+ Value *Trunc0 = Builder.CreateTrunc(I->getOperand(0), NewType);
+ Value *Trunc1 = Builder.CreateTrunc(I->getOperand(1), NewType);
+ Value *Arith =
+ Builder.CreateBinOp((Instruction::BinaryOps)Opc, Trunc0, Trunc1);
+
+ Value *Zext = Builder.CreateZExt(Arith, OldType);
+ I->replaceAllUsesWith(Zext);
+ I->eraseFromParent();
+ return true;
+}
+
bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
if (foldBinOpIntoSelect(I))
return true;
@@ -1645,6 +1726,9 @@ bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
}
}
+ Changed = tryNarrowMathIfNoOverflow(&I, ST.getTargetLowering(),
+ TM.getTargetTransformInfo(F), DL);
+
return Changed;
}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-mul24.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-mul24.ll
index 296b817bc8f75..d7c35a8b007c6 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-mul24.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-mul24.ll
@@ -414,7 +414,10 @@ define i64 @umul24_i64_2(i64 %lhs, i64 %rhs) {
; DISABLED-LABEL: @umul24_i64_2(
; DISABLED-NEXT: [[LHS24:%.*]] = and i64 [[LHS:%.*]], 65535
; DISABLED-NEXT: [[RHS24:%.*]] = and i64 [[RHS:%.*]], 65535
-; DISABLED-NEXT: [[MUL:%.*]] = mul i64 [[LHS24]], [[RHS24]]
+; DISABLED-NEXT: [[TMP1:%.*]] = trunc i64 [[LHS24]] to i32
+; DISABLED-NEXT: [[TMP2:%.*]] = trunc i64 [[RHS24]] to i32
+; DISABLED-NEXT: [[TMP3:%.*]] = mul i32 [[TMP1]], [[TMP2]]
+; DISABLED-NEXT: [[MUL:%.*]] = zext i32 [[TMP3]] to i64
; DISABLED-NEXT: ret i64 [[MUL]]
;
%lhs24 = and i64 %lhs, 65535
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
index 62083b3e67ab6..e2dfcf55b7856 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
@@ -1823,22 +1823,22 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1264: ; %bb.0: ; %entry
; GFX1264-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1264-NEXT: s_mov_b64 s[6:7], exec
-; GFX1264-NEXT: s_mov_b32 s9, 0
-; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0
; GFX1264-NEXT: s_mov_b64 s[4:5], exec
+; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0
; GFX1264-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1264-NEXT: v_mbcnt_hi_u32_b32 v2, s7, v0
; GFX1264-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1264-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1264-NEXT: s_cbranch_execz .LBB3_2
; GFX1264-NEXT: ; %bb.1:
-; GFX1264-NEXT: s_bcnt1_i32_b64 s8, s[6:7]
+; GFX1264-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1264-NEXT: v_mov_b32_e32 v1, 0
+; GFX1264-NEXT: s_wait_alu 0xfffe
+; GFX1264-NEXT: s_mul_i32 s6, s6, 5
; GFX1264-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1264-NEXT: s_mul_u64 s[6:7], s[8:9], 5
-; GFX1264-NEXT: s_mov_b32 s10, -1
; GFX1264-NEXT: s_wait_alu 0xfffe
; GFX1264-NEXT: v_mov_b32_e32 v0, s6
-; GFX1264-NEXT: v_mov_b32_e32 v1, s7
+; GFX1264-NEXT: s_mov_b32 s10, -1
; GFX1264-NEXT: s_wait_kmcnt 0x0
; GFX1264-NEXT: s_mov_b32 s8, s2
; GFX1264-NEXT: s_mov_b32 s9, s3
@@ -1860,20 +1860,19 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-LABEL: add_i64_constant:
; GFX1232: ; %bb.0: ; %entry
; GFX1232-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1232-NEXT: s_mov_b32 s7, exec_lo
-; GFX1232-NEXT: s_mov_b32 s5, 0
-; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s7, 0
; GFX1232-NEXT: s_mov_b32 s6, exec_lo
+; GFX1232-NEXT: s_mov_b32 s4, exec_lo
+; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0
; GFX1232-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1232-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1232-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1232-NEXT: s_cbranch_execz .LBB3_2
; GFX1232-NEXT: ; %bb.1:
-; GFX1232-NEXT: s_bcnt1_i32_b32 s4, s7
+; GFX1232-NEXT: s_bcnt1_i32_b32 s5, s6
; GFX1232-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1232-NEXT: s_mul_u64 s[4:5], s[4:5], 5
+; GFX1232-NEXT: s_mul_i32 s5, s5, 5
; GFX1232-NEXT: s_mov_b32 s10, -1
-; GFX1232-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX1232-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, 0
; GFX1232-NEXT: s_wait_kmcnt 0x0
; GFX1232-NEXT: s_mov_b32 s8, s2
; GFX1232-NEXT: s_mov_b32 s9, s3
@@ -1881,8 +1880,7 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-NEXT: s_wait_loadcnt 0x0
; GFX1232-NEXT: global_inv scope:SCOPE_DEV
; GFX1232-NEXT: .LBB3_2:
-; GFX1232-NEXT: s_wait_alu 0xfffe
-; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s6
+; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1232-NEXT: s_wait_kmcnt 0x0
; GFX1232-NEXT: v_readfirstlane_b32 s3, v1
; GFX1232-NEXT: v_readfirstlane_b32 s2, v0
@@ -5372,22 +5370,22 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1264: ; %bb.0: ; %entry
; GFX1264-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1264-NEXT: s_mov_b64 s[6:7], exec
-; GFX1264-NEXT: s_mov_b32 s9, 0
-; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0
; GFX1264-NEXT: s_mov_b64 s[4:5], exec
+; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0
; GFX1264-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1264-NEXT: v_mbcnt_hi_u32_b32 v2, s7, v0
; GFX1264-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1264-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1264-NEXT: s_cbranch_execz .LBB9_2
; GFX1264-NEXT: ; %bb.1:
-; GFX1264-NEXT: s_bcnt1_i32_b64 s8, s[6:7]
+; GFX1264-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
+; GFX1264-NEXT: v_mov_b32_e32 v1, 0
+; GFX1264-NEXT: s_wait_alu 0xfffe
+; GFX1264-NEXT: s_mul_i32 s6, s6, 5
; GFX1264-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1264-NEXT: s_mul_u64 s[6:7], s[8:9], 5
-; GFX1264-NEXT: s_mov_b32 s10, -1
; GFX1264-NEXT: s_wait_alu 0xfffe
; GFX1264-NEXT: v_mov_b32_e32 v0, s6
-; GFX1264-NEXT: v_mov_b32_e32 v1, s7
+; GFX1264-NEXT: s_mov_b32 s10, -1
; GFX1264-NEXT: s_wait_kmcnt 0x0
; GFX1264-NEXT: s_mov_b32 s8, s2
; GFX1264-NEXT: s_mov_b32 s9, s3
@@ -5412,20 +5410,19 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-LABEL: sub_i64_constant:
; GFX1232: ; %bb.0: ; %entry
; GFX1232-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1232-NEXT: s_mov_b32 s7, exec_lo
-; GFX1232-NEXT: s_mov_b32 s5, 0
-; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s7, 0
; GFX1232-NEXT: s_mov_b32 s6, exec_lo
+; GFX1232-NEXT: s_mov_b32 s4, exec_lo
+; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0
; GFX1232-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1232-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1232-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1232-NEXT: s_cbranch_execz .LBB9_2
; GFX1232-NEXT: ; %bb.1:
-; GFX1232-NEXT: s_bcnt1_i32_b32 s4, s7
+; GFX1232-NEXT: s_bcnt1_i32_b32 s5, s6
; GFX1232-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1232-NEXT: s_mul_u64 s[4:5], s[4:5], 5
+; GFX1232-NEXT: s_mul_i32 s5, s5, 5
; GFX1232-NEXT: s_mov_b32 s10, -1
-; GFX1232-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX1232-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, 0
; GFX1232-NEXT: s_wait_kmcnt 0x0
; GFX1232-NEXT: s_mov_b32 s8, s2
; GFX1232-NEXT: s_mov_b32 s9, s3
@@ -5433,8 +5430,7 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-NEXT: s_wait_loadcnt 0x0
; GFX1232-NEXT: global_inv scope:SCOPE_DEV
; GFX1232-NEXT: .LBB9_2:
-; GFX1232-NEXT: s_wait_alu 0xfffe
-; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s6
+; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1232-NEXT: s_wait_kmcnt 0x0
; GFX1232-NEXT: v_readfirstlane_b32 s2, v0
; GFX1232-NEXT: v_mul_u32_u24_e32 v0, 5, v2
diff --git a/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll b/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll
new file mode 100644
index 0000000000000..3f49b1e550595
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll
@@ -0,0 +1,231 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck %s
+
+define i64 @narrow_add(i64 %a, i64 %b) {
+; CHECK-LABEL: narrow_add:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; CHECK-NEXT: v_and_b32_e32 v1, 0x7fffffff, v2
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_add_nc_u32 v0, v0, v1
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %zext0 = and i64 %a, 2147483647
+ %zext1 = and i64 %b, 2147483647
+ %add = add i64 %zext0, %zext1
+ ret i64 %add
+}
+
+define i64 @narrow_add_1(i64 %a, i64 %b) {
+; CHECK-LABEL: narrow_add_1:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_bfi_b32 v0, 0x7fffffff, v0, v2
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %zext0 = and i64 %a, 2147483647
+ %zext1 = and i64 %b, 2147483648
+ %add = add i64 %zext0, %zext1
+ ret i64 %add
+}
+
+define <2 x i64> @narrow_add_vec(<2 x i64> %a, <2 x i64> %b) #0 {
+; CHECK-LABEL: narrow_add_vec:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; CHECK-NEXT: v_and_b32_e32 v1, 0x7fffffff, v4
+; CHECK-NEXT: v_and_b32_e32 v2, 30, v2
+; CHECK-NEXT: v_and_b32_e32 v3, 0x7ffffffe, v6
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT: v_add_co_u32 v0, s0, v0, v1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, 0, s0
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT: v_add_co_u32 v2, s0, v2, v3
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %zext0 = and <2 x i64> %a, <i64 2147483647, i64 30>
+ %zext1 = and <2 x i64> %b, <i64 2147483647, i64 2147483646>
+ %add = add <2 x i64> %zext0, %zext1
+ ret <2 x i64> %add
+}
+
+define <2 x i32> @narrow_add_vec_1(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-LABEL: narrow_add_vec_1:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v1, 0x3fff, v1
+; CHECK-NEXT: v_and_b32_e32 v0, 0x4000, v0
+; CHECK-NEXT: v_and_b32_e32 v3, 0x4001, v3
+; CHECK-NEXT: v_and_b32_e32 v2, 0x4000, v2
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; CHECK-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; CHECK-NEXT: v_perm_b32 v1, v3, v2, 0x5040100
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT: v_pk_add_u16 v1, v0, v1
+; CHECK-NEXT: v_and_b32_e32 v0, 0xc000, v1
+; CHECK-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %zext0 = and <2 x i32> %a, <i32 16384, i32 16383>
+ %zext1 = and <2 x i32> %b, <i32 16384, i32 16385>
+ %add = add <2 x i32> %zext0, %zext1
+ ret <2 x i32> %add
+}
+
+define i64 @narrow_mul(i64 %a, i64 %b) {
+; CHECK-LABEL: narrow_mul:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v1, 2, v2
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_mul_lo_u32 v0, v0, v1
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %zext0 = and i64 %a, 2147483647
+ %zext1 = and i64 %b, 2
+ %mul = mul i64 %zext0, %zext1
+ ret i64 %mul
+}
+
+define i64 @narrow_mul_1(i64 %a, i64 %b) {
+; CHECK-LABEL: narrow_mul_1:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v1, 0xf73594, v0
+; CHECK-NEXT: v_and_b32_e32 v2, 0x100, v2
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_mul_u32_u24_e32 v0, v1, v2
+; CHECK-NEXT: v_mul_hi_u32_u24_e32 v1, v1, v2
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %zext0 = and i64 %a, 16201108
+ %zext1 = and i64 %b, 256
+ %mul = mul i64 %zext0, %zext1
+ ret i64 %mul
+}
+
+define <2 x i64> @narrow_mul_vec(<2 x i64> %a, <2 x i64> %b) #0 {
+; CHECK-LABEL: narrow_mul_vec:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v0, 0x2d48aff, v0
+; CHECK-NEXT: v_and_b32_e32 v1, 0x50, v4
+; CHECK-NEXT: v_and_b32_e32 v3, 50, v2
+; CHECK-NEXT: v_and_b32_e32 v4, 20, v6
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; CHECK-NEXT: v_mul_lo_u32 v0, v0, v1
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: v_mul_u32_u24_e32 v2, v3, v4
+; CHECK-NEXT: v_mul_hi_u32_u24_e32 v3, v3, v4
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %zext0 = and <2 x i64> %a, <i64 47483647, i64 50>
+ %zext1 = and <2 x i64> %b, <i64 80, i64 20>
+ %mul = mul <2 x i64> %zext0, %zext1
+ ret <2 x i64> %mul
+}
+
+define <2 x i32> @narrow_add_mul_1(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-LABEL: narrow_add_mul_1:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v1, 0x4000, v1
+; CHECK-NEXT: v_and_b32_e32 v0, 0x4000, v0
+; CHECK-NEXT: v_and_b32_e32 v2, 3, v2
+; CHECK-NEXT: v_and_b32_e32 v3, 2, v3
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; CHECK-NEXT: v_mul_u32_u24_e32 v0, v0, v2
+; CHECK-NEXT: v_mul_u32_u24_e32 v1, v1, v3
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %zext0 = and <2 x i32> %a, <i32 16384, i32 16384>
+ %zext1 = and <2 x i32> %b, <i32 3, i32 2>
+ %mul = mul <2 x i32> %zext0, %zext1
+ ret <2 x i32> %mul
+}
+
+define i64 @no_narrow_add(i64 %a, i64 %b) {
+; CHECK-LABEL: no_narrow_add:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v0, 0x80000000, v0
+; CHECK-NEXT: v_and_b32_e32 v1, 0x80000000, v2
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT: v_add_co_u32 v0, s0, v0, v1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, 0, s0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %zext0 = and i64 %a, 2147483648
+ %zext1 = and i64 %b, 2147483648
+ %add = add i64 %zext0, %zext1
+ ret i64 %add
+}
+
+define i64 @no_narrow_add_1(i64 %a, i64 %b) {
+; CHECK-LABEL: no_narrow_add_1:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v1, 1, v2
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT: v_add_co_u32 v0, s0, v0, v1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, 0, s0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %zext0 = and i64 %a, 4294967295
+ %zext1 = and i64 %b, 1
+ %add = add i64 %zext0, %zext1
+ ret i64 %add
+}
+
+define <2 x i64> @no_narrow_add_vec(<2 x i64> %a, <2 x i64> %b) #0 {
+; CHECK-LABEL: no_narrow_add_vec:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v0, 0x80000000, v0
+; CHECK-NEXT: v_and_b32_e32 v1, 0x80000000, v4
+; CHECK-NEXT: v_and_b32_e32 v2, 30, v2
+; CHECK-NEXT: v_and_b32_e32 v3, 0x7ffffffe, v6
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT: v_add_co_u32 v0, s0, v0, v1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, 0, s0
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT: v_add_co_u32 v2, s0, v2, v3
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %zext0 = and <2 x i64> %a, <i64 2147483648, i64 30>
+ %zext1 = and <2 x i64> %b, <i64 2147483648, i64 2147483646>
+ %add = add <2 x i64> %zext0, %zext1
+ ret <2 x i64> %add
+}
+
+define i64 @no_narrow_mul(i64 %a, i64 %b) {
+; CHECK-LABEL: no_narrow_mul:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v0, 0x80000000, v0
+; CHECK-NEXT: v_and_b32_e32 v1, 2, v2
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_mul_hi_u32 v1, v0, v1
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %zext0 = and i64 %a, 2147483648
+ %zext1 = and i64 %b, 2
+ %mul = mul i64 %zext0, %zext1
+ ret i64 %mul
+}
+
+define <2 x i64> @no_narrow_mul_vec(<2 x i64> %a, <2 x i64> %b) #0 {
+; CHECK-LABEL: no_narrow_mul_vec:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v1, 0x8000, v0
+; CHECK-NEXT: v_and_b32_e32 v3, 0x20000, v4
+; CHECK-NEXT: v_and_b32_e32 v4, 50, v2
+; CHECK-NEXT: v_and_b32_e32 v5, 20, v6
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; CHECK-NEXT: v_mul_u32_u24_e32 v0, v1, v3
+; CHECK-NEXT: v_mul_hi_u32_u24_e32 v1, v1, v3
+; CHECK-NEXT: v_mul_u32_u24_e32 v2, v4, v5
+; CHECK-NEXT: v_mul_hi_u32_u24_e32 v3, v4, v5
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %zext0 = and <2 x i64> %a, <i64 32768, i64 50>
+ %zext1 = and <2 x i64> %b, <i64 131072, i64 20>
+ %mul = mul <2 x i64> %zext0, %zext1
+ ret <2 x i64> %mul
+}
diff --git a/llvm/test/CodeGen/AMDGPU/widen-smrd-loads.ll b/llvm/test/CodeGen/AMDGPU/widen-smrd-loads.ll
index 4290590e99711..4eb7761bfbddd 100644
--- a/llvm/test/CodeGen/AMDGPU/widen-smrd-loads.ll
+++ b/llvm/test/CodeGen/AMDGPU/widen-smrd-loads.ll
@@ -508,17 +508,16 @@ define amdgpu_kernel void @widen_i1_zext_to_i64_constant_load(ptr addrspace(4) %
; SI-LABEL: widen_i1_zext_to_i64_constant_load:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
+; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_load_dword s2, s[0:1], 0x0
; SI-NEXT: s_mov_b64 s[0:1], 0
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_and_b32 s2, s2, 1
-; SI-NEXT: s_add_u32 s4, s2, 0x3e7
-; SI-NEXT: s_addc_u32 s5, 0, 0
-; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: s_and_b32 s2, s2, 0xff
+; SI-NEXT: s_addk_i32 s2, 0x3e7
+; SI-NEXT: v_mov_b32_e32 v0, s2
; SI-NEXT: s_mov_b32 s2, -1
-; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
>From 69271c0337d3c84d12d52e9faaacd28bbabb5499 Mon Sep 17 00:00:00 2001
From: shore <372660931 at qq.com>
Date: Tue, 1 Apr 2025 19:05:13 +0800
Subject: [PATCH 2/5] fix address sanitizer failure
---
llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index eb5c160670992..b0914c17b9827 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -1652,6 +1652,9 @@ bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
if (UseMul24Intrin && replaceMulWithMul24(I))
return true;
+ if (tryNarrowMathIfNoOverflow(&I, ST.getTargetLowering(),
+ TM.getTargetTransformInfo(F), DL))
+ return true;
bool Changed = false;
Instruction::BinaryOps Opc = I.getOpcode();
@@ -1726,9 +1729,6 @@ bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
}
}
- Changed = tryNarrowMathIfNoOverflow(&I, ST.getTargetLowering(),
- TM.getTargetTransformInfo(F), DL);
-
return Changed;
}
>From a937f5bb721211b1e7caff92d8cd10834c1db8fa Mon Sep 17 00:00:00 2001
From: shore <372660931 at qq.com>
Date: Wed, 2 Apr 2025 13:54:48 +0800
Subject: [PATCH 3/5] fix comments
---
llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index b0914c17b9827..234f0ce0f1101 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -1562,15 +1562,15 @@ void AMDGPUCodeGenPrepareImpl::expandDivRem64(BinaryOperator &I) const {
}
Type *findSmallestLegalBits(Instruction *I, int OrigBit, int MaxBitsNeeded,
- const TargetLowering *TLI, const DataLayout &DL) {
+ const SITargetLowering *TLI, const DataLayout &DL) {
if (MaxBitsNeeded >= OrigBit)
return nullptr;
Type *NewType = I->getType()->getWithNewBitWidth(MaxBitsNeeded);
+ unsigned ISDOpc = TLI->InstructionOpcodeToISD(I->getOpcode());
while (OrigBit > MaxBitsNeeded) {
- if (TLI->isOperationLegalOrCustom(
- TLI->InstructionOpcodeToISD(I->getOpcode()),
- TLI->getValueType(DL, NewType, true)))
+ if (TLI->isOperationLegalOrCustom(ISDOpc,
+ TLI->getValueType(DL, NewType, true)))
return NewType;
MaxBitsNeeded *= 2;
@@ -1579,7 +1579,8 @@ Type *findSmallestLegalBits(Instruction *I, int OrigBit, int MaxBitsNeeded,
return nullptr;
}
-static bool tryNarrowMathIfNoOverflow(Instruction *I, const TargetLowering *TLI,
+static bool tryNarrowMathIfNoOverflow(Instruction *I,
+ const SITargetLowering *TLI,
const TargetTransformInfo &TTI,
const DataLayout &DL) {
unsigned Opc = I->getOpcode();
>From 80695a052429c04f63c3d11a907b1e1510feb253 Mon Sep 17 00:00:00 2001
From: shore <372660931 at qq.com>
Date: Mon, 7 Apr 2025 13:34:00 +0800
Subject: [PATCH 4/5] remove isd related type check
---
.../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 4 +---
.../CodeGen/AMDGPU/narrow_math_for_and.ll | 22 +++++++++----------
2 files changed, 11 insertions(+), 15 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 234f0ce0f1101..126f990e557f6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -1567,10 +1567,8 @@ Type *findSmallestLegalBits(Instruction *I, int OrigBit, int MaxBitsNeeded,
return nullptr;
Type *NewType = I->getType()->getWithNewBitWidth(MaxBitsNeeded);
- unsigned ISDOpc = TLI->InstructionOpcodeToISD(I->getOpcode());
while (OrigBit > MaxBitsNeeded) {
- if (TLI->isOperationLegalOrCustom(ISDOpc,
- TLI->getValueType(DL, NewType, true)))
+ if (TLI->isTypeLegal(TLI->getValueType(DL, NewType, true)))
return NewType;
MaxBitsNeeded *= 2;
diff --git a/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll b/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll
index 3f49b1e550595..ea406a3717f60 100644
--- a/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll
+++ b/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll
@@ -34,16 +34,14 @@ define <2 x i64> @narrow_add_vec(<2 x i64> %a, <2 x i64> %b) #0 {
; CHECK-LABEL: narrow_add_vec:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v1, 30, v2
; CHECK-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
-; CHECK-NEXT: v_and_b32_e32 v1, 0x7fffffff, v4
-; CHECK-NEXT: v_and_b32_e32 v2, 30, v2
+; CHECK-NEXT: v_and_b32_e32 v2, 0x7fffffff, v4
; CHECK-NEXT: v_and_b32_e32 v3, 0x7ffffffe, v6
-; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-NEXT: v_add_co_u32 v0, s0, v0, v1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, 0, s0
-; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-NEXT: v_add_co_u32 v2, s0, v2, v3
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s0
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; CHECK-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; CHECK-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_add_nc_u32 v2, v1, v3
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%zext0 = and <2 x i64> %a, <i64 2147483647, i64 30>
%zext1 = and <2 x i64> %b, <i64 2147483647, i64 2147483646>
@@ -110,13 +108,13 @@ define <2 x i64> @narrow_mul_vec(<2 x i64> %a, <2 x i64> %b) #0 {
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_and_b32_e32 v0, 0x2d48aff, v0
; CHECK-NEXT: v_and_b32_e32 v1, 0x50, v4
-; CHECK-NEXT: v_and_b32_e32 v3, 50, v2
-; CHECK-NEXT: v_and_b32_e32 v4, 20, v6
+; CHECK-NEXT: v_and_b32_e32 v2, 50, v2
+; CHECK-NEXT: v_and_b32_e32 v3, 20, v6
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; CHECK-NEXT: v_mul_lo_u32 v0, v0, v1
; CHECK-NEXT: v_mov_b32_e32 v1, 0
-; CHECK-NEXT: v_mul_u32_u24_e32 v2, v3, v4
-; CHECK-NEXT: v_mul_hi_u32_u24_e32 v3, v3, v4
+; CHECK-NEXT: v_mul_u32_u24_e32 v2, v2, v3
+; CHECK-NEXT: v_mov_b32_e32 v3, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%zext0 = and <2 x i64> %a, <i64 47483647, i64 50>
%zext1 = and <2 x i64> %b, <i64 80, i64 20>
>From 8d87d7cdf84c01743a37289133ef893a03451a67 Mon Sep 17 00:00:00 2001
From: shore <372660931 at qq.com>
Date: Tue, 8 Apr 2025 13:41:55 +0800
Subject: [PATCH 5/5] fix comments
---
.../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 36 +++++++++----------
.../CodeGen/AMDGPU/memcpy-crash-issue63986.ll | 10 +++---
.../CodeGen/AMDGPU/narrow_math_for_and.ll | 12 +++----
llvm/test/CodeGen/AMDGPU/widen-smrd-loads.ll | 16 ++++-----
4 files changed, 34 insertions(+), 40 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index afabb5573ce58..a8b86fc2d7954 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -1559,22 +1559,18 @@ void AMDGPUCodeGenPrepareImpl::expandDivRem64(BinaryOperator &I) const {
llvm_unreachable("not a division");
}
-Type *findSmallestLegalBits(Instruction *I, int OrigBit, int MaxBitsNeeded,
- const SITargetLowering *TLI, const DataLayout &DL) {
- if (MaxBitsNeeded >= OrigBit)
- return nullptr;
-
- Type *NewType = I->getType()->getWithNewBitWidth(MaxBitsNeeded);
- while (OrigBit > MaxBitsNeeded) {
- if (TLI->isTypeLegal(TLI->getValueType(DL, NewType, true)))
- return NewType;
-
- MaxBitsNeeded *= 2;
- NewType = I->getType()->getWithNewBitWidth(MaxBitsNeeded);
- }
- return nullptr;
-}
-
+/*
+This will cause non-byte load in consistency, for example:
+```
+ %load = load i1, ptr addrspace(4) %arg, align 4
+ %zext = zext i1 %load to
+ i64 %add = add i64 %zext
+```
+Instead of creating `s_and_b32 s0, s0, 1`,
+it will create `s_and_b32 s0, s0, 0xff`.
+We accept this change since the non-byte load assumes the upper bits
+within the byte are all 0.
+*/
static bool tryNarrowMathIfNoOverflow(Instruction *I,
const SITargetLowering *TLI,
const TargetTransformInfo &TTI,
@@ -1605,10 +1601,14 @@ static bool tryNarrowMathIfNoOverflow(Instruction *I,
}
MaxBitsNeeded = std::max<unsigned>(bit_ceil(MaxBitsNeeded), 8);
- Type *NewType = findSmallestLegalBits(I, OrigBit, MaxBitsNeeded, TLI, DL);
-
+ Type *NewType =
+ DL.getSmallestLegalIntType(I->getType()->getContext(), MaxBitsNeeded);
if (!NewType)
return false;
+ unsigned NewBit = NewType->getIntegerBitWidth();
+ if (NewBit >= OrigBit)
+ return false;
+ NewType = I->getType()->getWithNewBitWidth(NewBit);
// Old cost
InstructionCost OldCost =
diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
index 8157b1a7f7c80..5d88f58a55a8a 100644
--- a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
+++ b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
@@ -162,13 +162,13 @@ define void @issue63986_reduced_expanded(i64 %idxprom) {
; CHECK-NEXT: s_cbranch_execnz .LBB1_8
; CHECK-NEXT: .LBB1_5: ; %loop-memcpy-residual.preheader
; CHECK-NEXT: v_mov_b32_e32 v0, s4
-; CHECK-NEXT: s_mov_b64 s[6:7], 0
+; CHECK-NEXT: s_mov_b64 s[8:9], 0
+; CHECK-NEXT: s_mov_b32 s7, 0
; CHECK-NEXT: v_mov_b32_e32 v1, s5
; CHECK-NEXT: .LBB1_6: ; %loop-memcpy-residual
-; CHECK-NEXT: s_add_u32 s4, s6, 1
-; CHECK-NEXT: s_addc_u32 s5, s7, 0
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[0:1]
-; CHECK-NEXT: s_mov_b64 s[6:7], 1
+; CHECK-NEXT: s_add_i32 s6, s8, 1
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
+; CHECK-NEXT: s_mov_b64 s[8:9], 1
; CHECK-NEXT: s_cbranch_vccnz .LBB1_6
; CHECK-NEXT: ; %bb.7: ; %Flow
; CHECK-NEXT: v_mov_b32_e32 v0, 0
diff --git a/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll b/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll
index ea406a3717f60..151456e82ae51 100644
--- a/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll
+++ b/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll
@@ -55,15 +55,11 @@ define <2 x i32> @narrow_add_vec_1(<2 x i32> %a, <2 x i32> %b) #0 {
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_and_b32_e32 v1, 0x3fff, v1
; CHECK-NEXT: v_and_b32_e32 v0, 0x4000, v0
-; CHECK-NEXT: v_and_b32_e32 v3, 0x4001, v3
; CHECK-NEXT: v_and_b32_e32 v2, 0x4000, v2
-; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; CHECK-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
-; CHECK-NEXT: v_perm_b32 v1, v3, v2, 0x5040100
-; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-NEXT: v_pk_add_u16 v1, v0, v1
-; CHECK-NEXT: v_and_b32_e32 v0, 0xc000, v1
-; CHECK-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; CHECK-NEXT: v_and_b32_e32 v3, 0x4001, v3
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; CHECK-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; CHECK-NEXT: v_add_nc_u32_e32 v1, v1, v3
; CHECK-NEXT: s_setpc_b64 s[30:31]
%zext0 = and <2 x i32> %a, <i32 16384, i32 16383>
%zext1 = and <2 x i32> %b, <i32 16384, i32 16385>
diff --git a/llvm/test/CodeGen/AMDGPU/widen-smrd-loads.ll b/llvm/test/CodeGen/AMDGPU/widen-smrd-loads.ll
index 4eb7761bfbddd..bac70b69650cd 100644
--- a/llvm/test/CodeGen/AMDGPU/widen-smrd-loads.ll
+++ b/llvm/test/CodeGen/AMDGPU/widen-smrd-loads.ll
@@ -526,13 +526,12 @@ define amdgpu_kernel void @widen_i1_zext_to_i64_constant_load(ptr addrspace(4) %
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; VI-NEXT: v_mov_b32_e32 v0, 0
; VI-NEXT: v_mov_b32_e32 v1, 0
+; VI-NEXT: v_mov_b32_e32 v3, 0
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_load_dword s0, s[0:1], 0x0
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_and_b32 s0, s0, 1
-; VI-NEXT: s_add_u32 s0, s0, 0x3e7
-; VI-NEXT: s_addc_u32 s1, 0, 0
-; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: s_and_b32 s0, s0, 0xff
+; VI-NEXT: s_addk_i32 s0, 0x3e7
; VI-NEXT: v_mov_b32_e32 v2, s0
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; VI-NEXT: s_endpgm
@@ -540,14 +539,13 @@ define amdgpu_kernel void @widen_i1_zext_to_i64_constant_load(ptr addrspace(4) %
; GFX11-LABEL: widen_i1_zext_to_i64_constant_load:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_and_b32 s0, s0, 1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_add_u32 s0, s0, 0x3e7
-; GFX11-NEXT: s_addc_u32 s1, 0, 0
-; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1
+; GFX11-NEXT: s_and_b32 s0, s0, 0xff
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_addk_i32 s0, 0x3e7
; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s0
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: s_endpgm
More information about the llvm-commits
mailing list