[llvm] [InstCombine] Fold negation of unsigned div of non-negatives (PR #84951)

Antonio Frighetto via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 14 04:58:08 PDT 2024


https://github.com/antoniofrighetto updated https://github.com/llvm/llvm-project/pull/84951

>From dbe66945b767e81f44c21c024285707efeb60ca4 Mon Sep 17 00:00:00 2001
From: Antonio Frighetto <me at antoniofrighetto.com>
Date: Tue, 12 Mar 2024 17:27:18 +0100
Subject: [PATCH] [InstCombine] Fold negation of unsigned div of non-negatives

Let InstCombine carry out the following fold:
`sub 0, (udiv nneg X, nneg C)` -> `sdiv nneg X, -C`.

Proofs: https://alive2.llvm.org/ce/z/T9zPod.
---
 .../InstCombine/InstCombineAddSub.cpp         |  26 +++-
 llvm/test/Transforms/InstCombine/sub.ll       | 127 ++++++++++++++++++
 2 files changed, 152 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index aaf7184a5562cd..e71655aefda50e 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -2062,6 +2062,26 @@ static Instruction *foldSubOfMinMax(BinaryOperator &I,
   return nullptr;
 }
 
+/// Fold `sub 0, (udiv nneg X, nneg C)` into `sdiv nneg X, -C`
+static Value *foldNegationOfUDivOfNonNegatives(BinaryOperator &I,
+                                               InstCombinerImpl &IC) {
+  Value *RHS = I.getOperand(1);
+  Value *X;
+  Constant *C;
+
+  const auto &SQ = IC.getSimplifyQuery().getWithInstruction(&I);
+  if (match(RHS, m_OneUse(m_UDiv(m_Value(X), m_Constant(C)))) &&
+      isKnownNonNegative(X, SQ) && isKnownNonNegative(C, SQ)) {
+    if ((isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) &&
+        !C->getSplatValue())
+      return nullptr;
+    return IC.Builder.CreateSDiv(X, ConstantExpr::getNeg(C), "",
+                                 cast<Instruction>(RHS)->isExact());
+  }
+
+  return nullptr;
+}
+
 Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
   if (Value *V = simplifySubInst(I.getOperand(0), I.getOperand(1),
                                  I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
@@ -2153,8 +2173,12 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
                                         Op1, *this))
       return BinaryOperator::CreateAdd(NegOp1, Op0);
   }
-  if (IsNegation)
+  if (IsNegation) {
+    if (Value *Res = foldNegationOfUDivOfNonNegatives(I, *this))
+      return replaceInstUsesWith(I, Res);
+
     return TryToNarrowDeduceFlags(); // Should have been handled in Negator!
+  }
 
   // (A*B)-(A*C) -> A*(B-C) etc
   if (Value *V = foldUsingDistributiveLaws(I))
diff --git a/llvm/test/Transforms/InstCombine/sub.ll b/llvm/test/Transforms/InstCombine/sub.ll
index 249b5673c8acfd..8d9da78bcb9b9a 100644
--- a/llvm/test/Transforms/InstCombine/sub.ll
+++ b/llvm/test/Transforms/InstCombine/sub.ll
@@ -2626,3 +2626,130 @@ define i8 @sub_of_adds_2xc(i8 %x, i8 %y) {
   %r = sub i8 %xc, %yc
   ret i8 %r
 }
+
+define i8 @test_neg_of_udiv_of_nonnegs(i8 %a) {
+; CHECK-LABEL: @test_neg_of_udiv_of_nonnegs(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[COND1:%.*]] = icmp sgt i8 [[A:%.*]], -1
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    [[NEG:%.*]] = sdiv i8 [[A]], -3
+; CHECK-NEXT:    ret i8 [[NEG]]
+;
+entry:
+  %cond1 = icmp sgt i8 %a, -1
+  call void @llvm.assume(i1 %cond1)
+  %div = udiv i8 %a, 3
+  %neg = sub i8 0, %div
+  ret i8 %neg
+}
+
+define i8 @test_neg_of_udiv_of_nonnegs_nsw(i8 %a) {
+; CHECK-LABEL: @test_neg_of_udiv_of_nonnegs_nsw(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[COND1:%.*]] = icmp sgt i8 [[A:%.*]], -1
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    [[NEG:%.*]] = sdiv i8 [[A]], -3
+; CHECK-NEXT:    ret i8 [[NEG]]
+;
+entry:
+  %cond1 = icmp sgt i8 %a, -1
+  call void @llvm.assume(i1 %cond1)
+  %div = udiv i8 %a, 3
+  %neg = sub nsw i8 0, %div
+  ret i8 %neg
+}
+
+define i8 @test_neg_of_udiv_of_nonnegs_exact_nsw(i8 %a) {
+; CHECK-LABEL: @test_neg_of_udiv_of_nonnegs_exact_nsw(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[COND1:%.*]] = icmp sgt i8 [[A:%.*]], -1
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    [[NEG:%.*]] = sdiv exact i8 [[A]], -3
+; CHECK-NEXT:    ret i8 [[NEG]]
+;
+entry:
+  %cond1 = icmp sgt i8 %a, -1
+  call void @llvm.assume(i1 %cond1)
+  %div = udiv exact i8 %a, 3
+  %neg = sub nsw i8 0, %div
+  ret i8 %neg
+}
+
+define i8 @test_neg_of_udiv_of_nonnegs_sdiv_exact(i8 %a) {
+; CHECK-LABEL: @test_neg_of_udiv_of_nonnegs_sdiv_exact(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[COND1:%.*]] = icmp sgt i8 [[A:%.*]], -1
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    [[NEG:%.*]] = sdiv exact i8 [[A]], -20
+; CHECK-NEXT:    ret i8 [[NEG]]
+;
+entry:
+  %cond1 = icmp sgt i8 %a, -1
+  call void @llvm.assume(i1 %cond1)
+  %div = sdiv exact i8 %a, 20
+  %neg = sub i8 0, %div
+  ret i8 %neg
+}
+
+define i8 @test_neg_of_udiv_of_nonnegs_negative_rhs(i8 %a) {
+; CHECK-LABEL: @test_neg_of_udiv_of_nonnegs_negative_rhs(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[COND1:%.*]] = icmp sgt i8 [[A:%.*]], -1
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    ret i8 0
+;
+entry:
+  %cond1 = icmp sgt i8 %a, -1
+  call void @llvm.assume(i1 %cond1)
+  %div = udiv i8 %a, -3
+  %neg = sub nsw i8 0, %div
+  ret i8 %neg
+}
+
+define i8 @test_neg_of_udiv_of_nonnegs_negative_lhs(i8 %a) {
+; CHECK-LABEL: @test_neg_of_udiv_of_nonnegs_negative_lhs(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[COND1:%.*]] = icmp sgt i8 [[A:%.*]], -2
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND1]])
+; CHECK-NEXT:    [[DIV:%.*]] = udiv i8 [[A]], 3
+; CHECK-NEXT:    [[NEG:%.*]] = sub nsw i8 0, [[DIV]]
+; CHECK-NEXT:    ret i8 [[NEG]]
+;
+entry:
+  %cond1 = icmp sge i8 %a, -1
+  call void @llvm.assume(i1 %cond1)
+  %div = udiv i8 %a, 3
+  %neg = sub nsw i8 0, %div
+  ret i8 %neg
+}
+
+define <4 x i8> @test_neg_of_udiv_of_nonnegs_vec_splat(<4 x i8> %a) {
+; CHECK-LABEL: @test_neg_of_udiv_of_nonnegs_vec_splat(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[MASK:%.*]] = and <4 x i8> [[A:%.*]], <i8 127, i8 127, i8 127, i8 127>
+; CHECK-NEXT:    [[NEG:%.*]] = sdiv <4 x i8> [[MASK]], <i8 -3, i8 -3, i8 -3, i8 -3>
+; CHECK-NEXT:    ret <4 x i8> [[NEG]]
+;
+entry:
+  %mask = and <4 x i8> %a, <i8 127, i8 127, i8 127, i8 127>
+  %div = udiv <4 x i8> %mask, <i8 3, i8 3, i8 3, i8 3>
+  %neg = sub <4 x i8> zeroinitializer, %div
+  ret <4 x i8> %neg
+}
+
+define <4 x i8> @test_neg_of_udiv_of_nonnegs_vec_nonsplat(<4 x i8> %a) {
+; CHECK-LABEL: @test_neg_of_udiv_of_nonnegs_vec_nonsplat(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[MASK:%.*]] = and <4 x i8> [[A:%.*]], <i8 127, i8 127, i8 127, i8 127>
+; CHECK-NEXT:    [[DIV:%.*]] = udiv <4 x i8> [[MASK]], <i8 1, i8 2, i8 3, i8 4>
+; CHECK-NEXT:    [[NEG:%.*]] = sub nsw <4 x i8> zeroinitializer, [[DIV]]
+; CHECK-NEXT:    ret <4 x i8> [[NEG]]
+;
+entry:
+  %mask = and <4 x i8> %a, <i8 127, i8 127, i8 127, i8 127>
+  %div = udiv <4 x i8> %mask, <i8 1, i8 2, i8 3, i8 4>
+  %neg = sub <4 x i8> zeroinitializer, %div
+  ret <4 x i8> %neg
+}
+
+declare void @llvm.assume(i1)



More information about the llvm-commits mailing list