[llvm] 2460786 - [InstCombine] fold inc-of-signbit-splat to not+lshr

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 29 05:50:13 PDT 2022


Author: Sanjay Patel
Date: 2022-08-29T08:48:22-04:00
New Revision: 246078604c871d43394407db7f9f953494f45570

URL: https://github.com/llvm/llvm-project/commit/246078604c871d43394407db7f9f953494f45570
DIFF: https://github.com/llvm/llvm-project/commit/246078604c871d43394407db7f9f953494f45570.diff

LOG: [InstCombine] fold inc-of-signbit-splat to not+lshr

(iN X s>> (N - 1)) + 1 --> (~X) u>> (N - 1)

https://alive2.llvm.org/ce/z/wzS474

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
    llvm/test/Transforms/InstCombine/add.ll
    llvm/test/Transforms/InstCombine/high-bit-signmask.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index b02405c067f55..8a49943f52bfb 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -849,6 +849,7 @@ static Instruction *foldNoWrapAdd(BinaryOperator &Add,
 
 Instruction *InstCombinerImpl::foldAddWithConstant(BinaryOperator &Add) {
   Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
+  Type *Ty = Add.getType();
   Constant *Op1C;
   if (!match(Op1, m_ImmConstant(Op1C)))
     return nullptr;
@@ -883,7 +884,14 @@ Instruction *InstCombinerImpl::foldAddWithConstant(BinaryOperator &Add) {
   if (match(Op0, m_Not(m_Value(X))))
     return BinaryOperator::CreateSub(InstCombiner::SubOne(Op1C), X);
 
+  // (iN X s>> (N - 1)) + 1 --> (~X) u>> (N - 1)
   const APInt *C;
+  if (match(Op0, m_OneUse(m_AShr(m_Value(X), m_APIntAllowUndef(C)))) &&
+      *C == (Ty->getScalarSizeInBits() - 1) && match(Op1, m_One())) {
+    Value *NotX = Builder.CreateNot(X, X->getName() + ".not");
+    return BinaryOperator::CreateLShr(NotX, ConstantInt::get(Ty, *C));
+  }
+
   if (!match(Op1, m_APInt(C)))
     return nullptr;
 
@@ -911,7 +919,6 @@ Instruction *InstCombinerImpl::foldAddWithConstant(BinaryOperator &Add) {
 
   // Is this add the last step in a convoluted sext?
   // add(zext(xor i16 X, -32768), -32768) --> sext X
-  Type *Ty = Add.getType();
   if (match(Op0, m_ZExt(m_Xor(m_Value(X), m_APInt(C2)))) &&
       C2->isMinSignedValue() && C2->sext(Ty->getScalarSizeInBits()) == *C)
     return CastInst::Create(Instruction::SExt, X, Ty);

diff  --git a/llvm/test/Transforms/InstCombine/add.ll b/llvm/test/Transforms/InstCombine/add.ll
index bc27117b368b2..58d97c971db7e 100644
--- a/llvm/test/Transforms/InstCombine/add.ll
+++ b/llvm/test/Transforms/InstCombine/add.ll
@@ -1900,8 +1900,8 @@ define i8 @not_mul_use2(i8 %x) {
 
 define i8 @full_ashr_inc(i8 %x) {
 ; CHECK-LABEL: @full_ashr_inc(
-; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 7
-; CHECK-NEXT:    [[R:%.*]] = add nsw i8 [[A]], 1
+; CHECK-NEXT:    [[X_NOT:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    [[R:%.*]] = lshr i8 [[X_NOT]], 7
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %a = ashr i8 %x, 7
@@ -1911,8 +1911,8 @@ define i8 @full_ashr_inc(i8 %x) {
 
 define <2 x i6> @full_ashr_inc_vec(<2 x i6> %x) {
 ; CHECK-LABEL: @full_ashr_inc_vec(
-; CHECK-NEXT:    [[A:%.*]] = ashr <2 x i6> [[X:%.*]], <i6 5, i6 poison>
-; CHECK-NEXT:    [[R:%.*]] = add <2 x i6> [[A]], <i6 1, i6 1>
+; CHECK-NEXT:    [[X_NOT:%.*]] = xor <2 x i6> [[X:%.*]], <i6 -1, i6 -1>
+; CHECK-NEXT:    [[R:%.*]] = lshr <2 x i6> [[X_NOT]], <i6 5, i6 5>
 ; CHECK-NEXT:    ret <2 x i6> [[R]]
 ;
   %a = ashr <2 x i6> %x, <i6 5, i6 poison>
@@ -1920,6 +1920,8 @@ define <2 x i6> @full_ashr_inc_vec(<2 x i6> %x) {
   ret <2 x i6> %r
 }
 
+; negative test - extra use
+
 define i8 @full_ashr_inc_use(i8 %x) {
 ; CHECK-LABEL: @full_ashr_inc_use(
 ; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 7
@@ -1933,6 +1935,8 @@ define i8 @full_ashr_inc_use(i8 %x) {
   ret i8 %r
 }
 
+; negative test - wrong shift amount
+
 define i8 @not_full_ashr_inc(i8 %x) {
 ; CHECK-LABEL: @not_full_ashr_inc(
 ; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 6
@@ -1944,6 +1948,8 @@ define i8 @not_full_ashr_inc(i8 %x) {
   ret i8 %r
 }
 
+; negative test - wrong add amount
+
 define i8 @full_ashr_not_inc(i8 %x) {
 ; CHECK-LABEL: @full_ashr_not_inc(
 ; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 7

diff  --git a/llvm/test/Transforms/InstCombine/high-bit-signmask.ll b/llvm/test/Transforms/InstCombine/high-bit-signmask.ll
index 8621ebcf14cae..a202c43fbb5c4 100644
--- a/llvm/test/Transforms/InstCombine/high-bit-signmask.ll
+++ b/llvm/test/Transforms/InstCombine/high-bit-signmask.ll
@@ -116,8 +116,8 @@ define i64 @n9(i64 %x) {
 
 define i64 @n10(i64 %x) {
 ; CHECK-LABEL: @n10(
-; CHECK-NEXT:    [[T0_NEG:%.*]] = ashr i64 [[X:%.*]], 63
-; CHECK-NEXT:    [[R:%.*]] = add nsw i64 [[T0_NEG]], 1
+; CHECK-NEXT:    [[X_NOT:%.*]] = xor i64 [[X:%.*]], -1
+; CHECK-NEXT:    [[R:%.*]] = lshr i64 [[X_NOT]], 63
 ; CHECK-NEXT:    ret i64 [[R]]
 ;
   %t0 = lshr i64 %x, 63


        


More information about the llvm-commits mailing list