[llvm] [InstCombine] Avoid unprofitable add with remainder transform (PR #147319)
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 7 08:18:22 PDT 2025
https://github.com/nikic created https://github.com/llvm/llvm-project/pull/147319
If C1 is 1, this will end up replacing the remainder with a multiply and a longer dependency chain. This is clearly unprofitable in the case where the remainder is an `and`, but I think the profitability is also questionable for `urem` (because usually udiv and urem are produced by the same instruction), so I've disabled both cases.
Fixes https://github.com/llvm/llvm-project/issues/147176.
>From f526aae9a42ebbc64ba506caf240b40caed3fca6 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Mon, 7 Jul 2025 17:07:48 +0200
Subject: [PATCH 1/2] Add additional tests for add with remainder fold
---
llvm/test/Transforms/InstCombine/add4.ll | 86 ++++++++++++++++++++++++
1 file changed, 86 insertions(+)
diff --git a/llvm/test/Transforms/InstCombine/add4.ll b/llvm/test/Transforms/InstCombine/add4.ll
index 0e97deb4d98ad..8fed197954e91 100644
--- a/llvm/test/Transforms/InstCombine/add4.ll
+++ b/llvm/test/Transforms/InstCombine/add4.ll
@@ -289,3 +289,89 @@ entry:
%add = add i32 %shl, %rem
ret i32 %add
}
+
+define i32 @fold_add_udiv_urem_no_mul(i32 noundef %val) {
+; CHECK-LABEL: @fold_add_udiv_urem_no_mul(
+; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[VAL:%.*]], 10
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], -9
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP1]], [[VAL]]
+; CHECK-NEXT: ret i32 [[ADD]]
+;
+ %div = udiv i32 %val, 10
+ %rem = urem i32 %val, 10
+ %add = add i32 %div, %rem
+ ret i32 %add
+}
+
+define i32 @fold_add_udiv_urem_rem_mul(i32 noundef %val) {
+; CHECK-LABEL: @fold_add_udiv_urem_rem_mul(
+; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[VAL:%.*]], 10
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[VAL]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[DIV]], -29
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP2]], [[TMP1]]
+; CHECK-NEXT: ret i32 [[ADD]]
+;
+ %div = udiv i32 %val, 10
+ %rem = urem i32 %val, 10
+ %mul = mul i32 %rem, 3
+ %add = add i32 %div, %mul
+ ret i32 %add
+}
+
+define i32 @fold_add_udiv_urem_pow2_no_mul(i32 noundef %arg) {
+; CHECK-LABEL: @fold_add_udiv_urem_pow2_no_mul(
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[ARG:%.*]], 4
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[LSHR]], -15
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP1]], [[ARG]]
+; CHECK-NEXT: ret i32 [[ADD]]
+;
+ %lshr = lshr i32 %arg, 4
+ %and = and i32 %arg, 15
+ %add = add i32 %lshr, %and
+ ret i32 %add
+}
+
+define i32 @fold_add_udiv_urem_pow2_div_mul(i32 noundef %arg) {
+; CHECK-LABEL: @fold_add_udiv_urem_pow2_div_mul(
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[ARG:%.*]], 4
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[LSHR]], -13
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP1]], [[ARG]]
+; CHECK-NEXT: ret i32 [[ADD]]
+;
+ %lshr = lshr i32 %arg, 4
+ %mul = mul i32 %lshr, 3
+ %and = and i32 %arg, 15
+ %add = add i32 %mul, %and
+ ret i32 %add
+}
+
+define i32 @fold_add_udiv_urem_pow2_rem_mul(i32 noundef %arg) {
+; CHECK-LABEL: @fold_add_udiv_urem_pow2_rem_mul(
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[ARG:%.*]], 4
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[ARG]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[LSHR]], -47
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP2]], [[TMP1]]
+; CHECK-NEXT: ret i32 [[ADD]]
+;
+ %lshr = lshr i32 %arg, 4
+ %and = and i32 %arg, 15
+ %mul = mul i32 %and, 3
+ %add = add i32 %lshr, %mul
+ ret i32 %add
+}
+
+define i32 @fold_add_udiv_urem_pow2_both_mul(i32 noundef %arg) {
+; CHECK-LABEL: @fold_add_udiv_urem_pow2_both_mul(
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[ARG:%.*]], 4
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[ARG]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[LSHR]], -41
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP2]], [[TMP1]]
+; CHECK-NEXT: ret i32 [[ADD]]
+;
+ %lshr = lshr i32 %arg, 4
+ %mul1 = mul i32 %lshr, 7
+ %and = and i32 %arg, 15
+ %mul2 = mul i32 %and, 3
+ %add = add i32 %mul1, %mul2
+ ret i32 %add
+}
>From 9b937ddd285b509ec52693a6ed5a9a9edfdee74a Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Mon, 7 Jul 2025 17:12:31 +0200
Subject: [PATCH 2/2] Don't perform transform for C1 == 1
---
.../InstCombine/InstCombineAddSub.cpp | 4 +++-
llvm/test/Transforms/InstCombine/add4.ll | 20 +++++++++----------
2 files changed, 13 insertions(+), 11 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 1ba548b6ff062..1ad0a9c488e80 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -1193,8 +1193,10 @@ Value *InstCombinerImpl::SimplifyAddWithRemainder(BinaryOperator &I) {
}
Value *DivOpV;
APInt DivOpC;
+ // The transform is valid for C1==1, but not profitable.
if (MatchRem(Rem, X, C0, IsSigned) &&
- MatchDiv(Div, DivOpV, DivOpC, IsSigned) && X == DivOpV && C0 == DivOpC) {
+ MatchDiv(Div, DivOpV, DivOpC, IsSigned) && X == DivOpV && C0 == DivOpC &&
+ !C1.isOne()) {
APInt NewC = C1 - C2 * C0;
if (!NewC.isZero() && !Rem->hasOneUse())
return nullptr;
diff --git a/llvm/test/Transforms/InstCombine/add4.ll b/llvm/test/Transforms/InstCombine/add4.ll
index 8fed197954e91..f766ccf651aa2 100644
--- a/llvm/test/Transforms/InstCombine/add4.ll
+++ b/llvm/test/Transforms/InstCombine/add4.ll
@@ -293,8 +293,8 @@ entry:
define i32 @fold_add_udiv_urem_no_mul(i32 noundef %val) {
; CHECK-LABEL: @fold_add_udiv_urem_no_mul(
; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[VAL:%.*]], 10
-; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], -9
-; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP1]], [[VAL]]
+; CHECK-NEXT: [[REM:%.*]] = urem i32 [[VAL]], 10
+; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[DIV]], [[REM]]
; CHECK-NEXT: ret i32 [[ADD]]
;
%div = udiv i32 %val, 10
@@ -306,9 +306,9 @@ define i32 @fold_add_udiv_urem_no_mul(i32 noundef %val) {
define i32 @fold_add_udiv_urem_rem_mul(i32 noundef %val) {
; CHECK-LABEL: @fold_add_udiv_urem_rem_mul(
; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[VAL:%.*]], 10
-; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[VAL]], 3
-; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[DIV]], -29
-; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[REM:%.*]] = urem i32 [[VAL]], 10
+; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[REM]], 3
+; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[DIV]], [[MUL]]
; CHECK-NEXT: ret i32 [[ADD]]
;
%div = udiv i32 %val, 10
@@ -321,8 +321,8 @@ define i32 @fold_add_udiv_urem_rem_mul(i32 noundef %val) {
define i32 @fold_add_udiv_urem_pow2_no_mul(i32 noundef %arg) {
; CHECK-LABEL: @fold_add_udiv_urem_pow2_no_mul(
; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[ARG:%.*]], 4
-; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[LSHR]], -15
-; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP1]], [[ARG]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[ARG]], 15
+; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[LSHR]], [[AND]]
; CHECK-NEXT: ret i32 [[ADD]]
;
%lshr = lshr i32 %arg, 4
@@ -348,9 +348,9 @@ define i32 @fold_add_udiv_urem_pow2_div_mul(i32 noundef %arg) {
define i32 @fold_add_udiv_urem_pow2_rem_mul(i32 noundef %arg) {
; CHECK-LABEL: @fold_add_udiv_urem_pow2_rem_mul(
; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[ARG:%.*]], 4
-; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[ARG]], 3
-; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[LSHR]], -47
-; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[ARG]], 15
+; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[AND]], 3
+; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[LSHR]], [[MUL]]
; CHECK-NEXT: ret i32 [[ADD]]
;
%lshr = lshr i32 %arg, 4
More information about the llvm-commits
mailing list