[llvm] r334127 - [InstCombine] PR37603: low bit mask canonicalization

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 6 12:38:27 PDT 2018


Author: lebedevri
Date: Wed Jun  6 12:38:27 2018
New Revision: 334127

URL: http://llvm.org/viewvc/llvm-project?rev=334127&view=rev
Log:
[InstCombine] PR37603: low bit mask canonicalization

Summary:
This is [[ https://bugs.llvm.org/show_bug.cgi?id=37603 | PR37603 ]].

https://godbolt.org/g/VCMNpS
https://rise4fun.com/Alive/idM

When doing bit manipulations, it is quite common to calculate some bit mask,
and apply it to some value via `and`.

The typical C code looks like:
```
int mask_signed_add(int nbits) {
    return (1 << nbits) - 1;
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_add(int)(i32) local_unnamed_addr #0 {
  %2 = shl i32 1, %0
  %3 = add nsw i32 %2, -1
  ret i32 %3
}
```

But there is a second, less readable variant:
```
int mask_signed_xor(int nbits) {
    return ~(-(1 << nbits));
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_xor(int)(i32) local_unnamed_addr #0 {
  %2 = shl i32 -1, %0
  %3 = xor i32 %2, -1
  ret i32 %3
}
```

Since we created such a mask, it is quite likely that we will use it in `and` next.
And then we may get rid of `not` op by folding into `andn`.

But now that i have actually looked:
https://godbolt.org/g/VTUDmU
_some_ backend changes will be needed too.
We clearly loose `bzhi` recognition.

Reviewers: spatel, craig.topper, RKSimon

Reviewed By: spatel

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D47428

Modified:
    llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp
    llvm/trunk/test/Transforms/InstCombine/rem.ll
    llvm/trunk/test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll

Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp?rev=334127&r1=334126&r2=334127&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp Wed Jun  6 12:38:27 2018
@@ -1096,6 +1096,30 @@ Value *InstCombiner::SimplifyAddWithRema
   return nullptr;
 }
 
+/// Fold
+///   (1 << NBits) - 1
+/// Into:
+///   ~(-(1 << NBits))
+/// Because a 'not' is better for bit-tracking analysis and other transforms
+/// than an 'add'. The new shl is always nsw, and is nuw if old `and` was.
+static Instruction *canonicalizeLowbitMask(BinaryOperator &I,
+                                           InstCombiner::BuilderTy &Builder) {
+  Value *NBits;
+  if (!match(&I, m_Add(m_OneUse(m_Shl(m_One(), m_Value(NBits))), m_AllOnes())))
+    return nullptr;
+
+  Constant *MinusOne = Constant::getAllOnesValue(NBits->getType());
+  Value *NotMask = Builder.CreateShl(MinusOne, NBits, "notmask");
+  // Be wary of constant folding.
+  if (auto *BOp = dyn_cast<BinaryOperator>(NotMask)) {
+    // Always NSW. But NUW propagates from `add`.
+    BOp->setHasNoSignedWrap();
+    BOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
+  }
+
+  return BinaryOperator::CreateNot(NotMask, I.getName());
+}
+
 Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
   bool Changed = SimplifyAssociativeOrCommutative(I);
   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
@@ -1347,6 +1371,9 @@ Instruction *InstCombiner::visitAdd(Bina
     I.setHasNoUnsignedWrap(true);
   }
 
+  if (Instruction *V = canonicalizeLowbitMask(I, Builder))
+    return V;
+
   return Changed ? &I : nullptr;
 }
 

Modified: llvm/trunk/test/Transforms/InstCombine/rem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/rem.ll?rev=334127&r1=334126&r2=334127&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/rem.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/rem.ll Wed Jun  6 12:38:27 2018
@@ -314,8 +314,8 @@ define i64 @test14(i64 %x, i32 %y) {
 
 define i64 @test15(i32 %x, i32 %y) {
 ; CHECK-LABEL: @test15(
-; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 1, [[Y:%.*]]
-; CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[SHL]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]]
 ; CHECK-NEXT:    [[UREM:%.*]] = zext i32 [[TMP2]] to i64
 ; CHECK-NEXT:    ret i64 [[UREM]]

Modified: llvm/trunk/test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll?rev=334127&r1=334126&r2=334127&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll Wed Jun  6 12:38:27 2018
@@ -17,8 +17,8 @@
 
 define i32 @shl_add(i32 %NBits) {
 ; CHECK-LABEL: @shl_add(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add i32 [[SETBIT]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl i32 1, %NBits
@@ -28,8 +28,8 @@ define i32 @shl_add(i32 %NBits) {
 
 define i32 @shl_add_nsw(i32 %NBits) {
 ; CHECK-LABEL: @shl_add_nsw(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add nsw i32 [[SETBIT]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl i32 1, %NBits
@@ -39,8 +39,8 @@ define i32 @shl_add_nsw(i32 %NBits) {
 
 define i32 @shl_add_nuw(i32 %NBits) {
 ; CHECK-LABEL: @shl_add_nuw(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add nuw i32 [[SETBIT]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nuw nsw i32 -1, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl i32 1, %NBits
@@ -50,8 +50,8 @@ define i32 @shl_add_nuw(i32 %NBits) {
 
 define i32 @shl_add_nsw_nuw(i32 %NBits) {
 ; CHECK-LABEL: @shl_add_nsw_nuw(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add nuw nsw i32 [[SETBIT]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nuw nsw i32 -1, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl i32 1, %NBits
@@ -63,8 +63,8 @@ define i32 @shl_add_nsw_nuw(i32 %NBits)
 
 define i32 @shl_nsw_add(i32 %NBits) {
 ; CHECK-LABEL: @shl_nsw_add(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl nsw i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add i32 [[SETBIT]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nsw i32 1, %NBits
@@ -74,8 +74,8 @@ define i32 @shl_nsw_add(i32 %NBits) {
 
 define i32 @shl_nsw_add_nsw(i32 %NBits) {
 ; CHECK-LABEL: @shl_nsw_add_nsw(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl nsw i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add nsw i32 [[SETBIT]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nsw i32 1, %NBits
@@ -85,8 +85,8 @@ define i32 @shl_nsw_add_nsw(i32 %NBits)
 
 define i32 @shl_nsw_add_nuw(i32 %NBits) {
 ; CHECK-LABEL: @shl_nsw_add_nuw(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl nsw i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add nuw i32 [[SETBIT]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nuw nsw i32 -1, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nsw i32 1, %NBits
@@ -96,8 +96,8 @@ define i32 @shl_nsw_add_nuw(i32 %NBits)
 
 define i32 @shl_nsw_add_nsw_nuw(i32 %NBits) {
 ; CHECK-LABEL: @shl_nsw_add_nsw_nuw(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl nsw i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add nuw nsw i32 [[SETBIT]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nuw nsw i32 -1, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nsw i32 1, %NBits
@@ -109,8 +109,8 @@ define i32 @shl_nsw_add_nsw_nuw(i32 %NBi
 
 define i32 @shl_nuw_add(i32 %NBits) {
 ; CHECK-LABEL: @shl_nuw_add(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl nuw i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add i32 [[SETBIT]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nuw i32 1, %NBits
@@ -120,8 +120,8 @@ define i32 @shl_nuw_add(i32 %NBits) {
 
 define i32 @shl_nuw_add_nsw(i32 %NBits) {
 ; CHECK-LABEL: @shl_nuw_add_nsw(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl nuw i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add nsw i32 [[SETBIT]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nuw i32 1, %NBits
@@ -131,8 +131,8 @@ define i32 @shl_nuw_add_nsw(i32 %NBits)
 
 define i32 @shl_nuw_add_nuw(i32 %NBits) {
 ; CHECK-LABEL: @shl_nuw_add_nuw(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl nuw i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add nuw i32 [[SETBIT]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nuw nsw i32 -1, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nuw i32 1, %NBits
@@ -142,8 +142,8 @@ define i32 @shl_nuw_add_nuw(i32 %NBits)
 
 define i32 @shl_nuw_add_nsw_nuw(i32 %NBits) {
 ; CHECK-LABEL: @shl_nuw_add_nsw_nuw(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl nuw i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add nuw nsw i32 [[SETBIT]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nuw nsw i32 -1, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nuw i32 1, %NBits
@@ -155,8 +155,8 @@ define i32 @shl_nuw_add_nsw_nuw(i32 %NBi
 
 define i32 @shl_nsw_nuw_add(i32 %NBits) {
 ; CHECK-LABEL: @shl_nsw_nuw_add(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl nuw nsw i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add i32 [[SETBIT]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nuw nsw i32 1, %NBits
@@ -166,8 +166,8 @@ define i32 @shl_nsw_nuw_add(i32 %NBits)
 
 define i32 @shl_nsw_nuw_add_nsw(i32 %NBits) {
 ; CHECK-LABEL: @shl_nsw_nuw_add_nsw(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl nuw nsw i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add nsw i32 [[SETBIT]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nuw nsw i32 1, %NBits
@@ -177,8 +177,8 @@ define i32 @shl_nsw_nuw_add_nsw(i32 %NBi
 
 define i32 @shl_nsw_nuw_add_nuw(i32 %NBits) {
 ; CHECK-LABEL: @shl_nsw_nuw_add_nuw(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl nuw nsw i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add nuw i32 [[SETBIT]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nuw nsw i32 -1, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nuw nsw i32 1, %NBits
@@ -188,8 +188,8 @@ define i32 @shl_nsw_nuw_add_nuw(i32 %NBi
 
 define i32 @shl_nsw_nuw_add_nsw_nuw(i32 %NBits) {
 ; CHECK-LABEL: @shl_nsw_nuw_add_nsw_nuw(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl nuw nsw i32 1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add nuw nsw i32 [[SETBIT]], -1
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nuw nsw i32 -1, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nuw nsw i32 1, %NBits
@@ -203,8 +203,8 @@ define i32 @shl_nsw_nuw_add_nsw_nuw(i32
 
 define <2 x i32> @shl_add_vec(<2 x i32> %NBits) {
 ; CHECK-LABEL: @shl_add_vec(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl <2 x i32> <i32 1, i32 1>, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add <2 x i32> [[SETBIT]], <i32 -1, i32 -1>
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw <2 x i32> <i32 -1, i32 -1>, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor <2 x i32> [[NOTMASK]], <i32 -1, i32 -1>
 ; CHECK-NEXT:    ret <2 x i32> [[RET]]
 ;
   %setbit = shl <2 x i32> <i32 1, i32 1>, %NBits
@@ -214,8 +214,8 @@ define <2 x i32> @shl_add_vec(<2 x i32>
 
 define <3 x i32> @shl_add_vec_undef0(<3 x i32> %NBits) {
 ; CHECK-LABEL: @shl_add_vec_undef0(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl <3 x i32> <i32 1, i32 undef, i32 1>, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add <3 x i32> [[SETBIT]], <i32 -1, i32 -1, i32 -1>
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor <3 x i32> [[NOTMASK]], <i32 -1, i32 -1, i32 -1>
 ; CHECK-NEXT:    ret <3 x i32> [[RET]]
 ;
   %setbit = shl <3 x i32> <i32 1, i32 undef, i32 1>, %NBits
@@ -225,8 +225,8 @@ define <3 x i32> @shl_add_vec_undef0(<3
 
 define <3 x i32> @shl_add_vec_undef1(<3 x i32> %NBits) {
 ; CHECK-LABEL: @shl_add_vec_undef1(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl <3 x i32> <i32 1, i32 1, i32 1>, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add <3 x i32> [[SETBIT]], <i32 -1, i32 undef, i32 -1>
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor <3 x i32> [[NOTMASK]], <i32 -1, i32 -1, i32 -1>
 ; CHECK-NEXT:    ret <3 x i32> [[RET]]
 ;
   %setbit = shl <3 x i32> <i32 1, i32 1, i32 1>, %NBits
@@ -236,8 +236,8 @@ define <3 x i32> @shl_add_vec_undef1(<3
 
 define <3 x i32> @shl_add_vec_undef2(<3 x i32> %NBits) {
 ; CHECK-LABEL: @shl_add_vec_undef2(
-; CHECK-NEXT:    [[SETBIT:%.*]] = shl <3 x i32> <i32 1, i32 undef, i32 1>, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = add <3 x i32> [[SETBIT]], <i32 -1, i32 undef, i32 -1>
+; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = xor <3 x i32> [[NOTMASK]], <i32 -1, i32 -1, i32 -1>
 ; CHECK-NEXT:    ret <3 x i32> [[RET]]
 ;
   %setbit = shl <3 x i32> <i32 1, i32 undef, i32 1>, %NBits




More information about the llvm-commits mailing list