[llvm] Change how low masks are canonicalized (PR #97978)

via llvm-commits llvm-commits at lists.llvm.org
Sun Jul 7 17:27:59 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-transforms

Author: AtariDreams (AtariDreams)

<details>
<summary>Changes</summary>



---
Full diff: https://github.com/llvm/llvm-project/pull/97978.diff


8 Files Affected:

- (modified) llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp (+5-8) 
- (modified) llvm/test/Transforms/InstCombine/and-add-shl.ll (+6-6) 
- (modified) llvm/test/Transforms/InstCombine/get-lowbitmask-upto-and-including-bit.ll (+15-15) 
- (modified) llvm/test/Transforms/InstCombine/mul.ll (+6-5) 
- (modified) llvm/test/Transforms/InstCombine/rem-mul-shl.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/rem.ll (+8-8) 
- (modified) llvm/test/Transforms/InstCombine/select-divrem.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll (+24-24) 


``````````diff
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 0a73c58c07409..7771d03a9aec9 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -1212,15 +1212,12 @@ static Instruction *canonicalizeLowbitMask(BinaryOperator &I,
     return nullptr;
 
   Constant *MinusOne = Constant::getAllOnesValue(NBits->getType());
-  Value *NotMask = Builder.CreateShl(MinusOne, NBits, "notmask");
-  // Be wary of constant folding.
-  if (auto *BOp = dyn_cast<BinaryOperator>(NotMask)) {
-    // Always NSW. But NUW propagates from `add`.
-    BOp->setHasNoSignedWrap();
-    BOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
-  }
+  Value *Sub = Builder.CreateSub(
+      ConstantInt::get(NBits->getType(),
+                       NBits->getType()->getScalarSizeInBits() - 1),
+      NBits);
 
-  return BinaryOperator::CreateNot(NotMask, I.getName());
+  return BinaryOperator::CreateLShr(MinusOne, Sub, I.getName());
 }
 
 static Instruction *foldToUnsignedSaturatedAdd(BinaryOperator &I) {
diff --git a/llvm/test/Transforms/InstCombine/and-add-shl.ll b/llvm/test/Transforms/InstCombine/and-add-shl.ll
index 92b3a8144d62c..b80c30928dead 100644
--- a/llvm/test/Transforms/InstCombine/and-add-shl.ll
+++ b/llvm/test/Transforms/InstCombine/and-add-shl.ll
@@ -10,9 +10,9 @@ define i8 @and_add_shl(i8 %x) {
 ; CHECK-SAME: (i8 [[X:%.*]]) {
 ; CHECK-NEXT:    [[OP1_P2:%.*]] = icmp ult i8 [[X]], 6
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[OP1_P2]])
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i8 -1, [[X]]
-; CHECK-NEXT:    [[SUB:%.*]] = and i8 [[NOTMASK]], 32
-; CHECK-NEXT:    [[R:%.*]] = xor i8 [[SUB]], 32
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], 7
+; CHECK-NEXT:    [[SUB:%.*]] = lshr i8 -1, [[TMP1]]
+; CHECK-NEXT:    [[R:%.*]] = and i8 [[SUB]], 32
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %op1_p2 = icmp ule i8 %x, 5
@@ -48,9 +48,9 @@ define i8 @and_add_shl_overlap(i8 %x) {
 ; CHECK-SAME: (i8 [[X:%.*]]) {
 ; CHECK-NEXT:    [[OP1_P2:%.*]] = icmp ult i8 [[X]], 7
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[OP1_P2]])
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i8 -1, [[X]]
-; CHECK-NEXT:    [[SUB:%.*]] = and i8 [[NOTMASK]], 32
-; CHECK-NEXT:    [[R:%.*]] = xor i8 [[SUB]], 32
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], 7
+; CHECK-NEXT:    [[SUB:%.*]] = lshr i8 -1, [[TMP1]]
+; CHECK-NEXT:    [[R:%.*]] = and i8 [[SUB]], 32
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %op1_p2 = icmp ule i8 %x, 6
diff --git a/llvm/test/Transforms/InstCombine/get-lowbitmask-upto-and-including-bit.ll b/llvm/test/Transforms/InstCombine/get-lowbitmask-upto-and-including-bit.ll
index 40caa57891369..a0ddf3007b9ab 100644
--- a/llvm/test/Transforms/InstCombine/get-lowbitmask-upto-and-including-bit.ll
+++ b/llvm/test/Transforms/InstCombine/get-lowbitmask-upto-and-including-bit.ll
@@ -125,9 +125,9 @@ define i8 @t8_extrause2(i8 %x) {
 define i8 @t9_nocse(i8 %x) {
 ; CHECK-LABEL: @t9_nocse(
 ; CHECK-NEXT:    [[BITMASK1:%.*]] = shl nuw i8 1, [[X:%.*]]
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i8 -1, [[X]]
-; CHECK-NEXT:    [[LOWBITMASK:%.*]] = xor i8 [[NOTMASK]], -1
-; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[BITMASK1]], [[LOWBITMASK]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 7, [[X]]
+; CHECK-NEXT:    [[LOWBITMASK:%.*]] = lshr i8 -1, [[TMP1]]
+; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[LOWBITMASK]], [[BITMASK1]]
 ; CHECK-NEXT:    ret i8 [[MASK]]
 ;
   %bitmask0 = shl i8 1, %x
@@ -157,9 +157,9 @@ define i8 @t11_nocse_extrause1(i8 %x) {
 ; CHECK-LABEL: @t11_nocse_extrause1(
 ; CHECK-NEXT:    [[BITMASK1:%.*]] = shl nuw i8 1, [[X:%.*]]
 ; CHECK-NEXT:    call void @use8(i8 [[BITMASK1]])
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i8 -1, [[X]]
-; CHECK-NEXT:    [[LOWBITMASK:%.*]] = xor i8 [[NOTMASK]], -1
-; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[BITMASK1]], [[LOWBITMASK]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 7, [[X]]
+; CHECK-NEXT:    [[LOWBITMASK:%.*]] = lshr i8 -1, [[TMP1]]
+; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[LOWBITMASK]], [[BITMASK1]]
 ; CHECK-NEXT:    ret i8 [[MASK]]
 ;
   %bitmask0 = shl i8 1, %x
@@ -172,10 +172,10 @@ define i8 @t11_nocse_extrause1(i8 %x) {
 define i8 @t12_nocse_extrause2(i8 %x) {
 ; CHECK-LABEL: @t12_nocse_extrause2(
 ; CHECK-NEXT:    [[BITMASK1:%.*]] = shl nuw i8 1, [[X:%.*]]
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i8 -1, [[X]]
-; CHECK-NEXT:    [[LOWBITMASK:%.*]] = xor i8 [[NOTMASK]], -1
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 7, [[X]]
+; CHECK-NEXT:    [[LOWBITMASK:%.*]] = lshr i8 -1, [[TMP1]]
 ; CHECK-NEXT:    call void @use8(i8 [[LOWBITMASK]])
-; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[BITMASK1]], [[LOWBITMASK]]
+; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[LOWBITMASK]], [[BITMASK1]]
 ; CHECK-NEXT:    ret i8 [[MASK]]
 ;
   %bitmask0 = shl i8 1, %x
@@ -225,10 +225,10 @@ define i8 @t15_nocse_extrause5(i8 %x) {
 ; CHECK-LABEL: @t15_nocse_extrause5(
 ; CHECK-NEXT:    [[BITMASK1:%.*]] = shl nuw i8 1, [[X:%.*]]
 ; CHECK-NEXT:    call void @use8(i8 [[BITMASK1]])
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i8 -1, [[X]]
-; CHECK-NEXT:    [[LOWBITMASK:%.*]] = xor i8 [[NOTMASK]], -1
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 7, [[X]]
+; CHECK-NEXT:    [[LOWBITMASK:%.*]] = lshr i8 -1, [[TMP1]]
 ; CHECK-NEXT:    call void @use8(i8 [[LOWBITMASK]])
-; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[BITMASK1]], [[LOWBITMASK]]
+; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[LOWBITMASK]], [[BITMASK1]]
 ; CHECK-NEXT:    ret i8 [[MASK]]
 ;
   %bitmask0 = shl i8 1, %x
@@ -264,9 +264,9 @@ define i8 @t16_nocse_extrause6(i8 %x) {
 define i8 @t17_nocse_mismatching_x(i8 %x0, i8 %x1) {
 ; CHECK-LABEL: @t17_nocse_mismatching_x(
 ; CHECK-NEXT:    [[BITMASK1:%.*]] = shl nuw i8 1, [[X1:%.*]]
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i8 -1, [[X0:%.*]]
-; CHECK-NEXT:    [[LOWBITMASK:%.*]] = xor i8 [[NOTMASK]], -1
-; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[BITMASK1]], [[LOWBITMASK]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 7, [[X0:%.*]]
+; CHECK-NEXT:    [[LOWBITMASK:%.*]] = lshr i8 -1, [[TMP1]]
+; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[LOWBITMASK]], [[BITMASK1]]
 ; CHECK-NEXT:    ret i8 [[MASK]]
 ;
   %bitmask0 = shl i8 1, %x0
diff --git a/llvm/test/Transforms/InstCombine/mul.ll b/llvm/test/Transforms/InstCombine/mul.ll
index 66455479feaaa..208117ebf565a 100644
--- a/llvm/test/Transforms/InstCombine/mul.ll
+++ b/llvm/test/Transforms/InstCombine/mul.ll
@@ -245,9 +245,10 @@ define i8 @shl1_decrement(i8 %x, i8 %y) {
 define i8 @shl1_decrement_commute(i8 %x, i8 noundef %p) {
 ; CHECK-LABEL: @shl1_decrement_commute(
 ; CHECK-NEXT:    [[Y:%.*]] = ashr i8 [[P:%.*]], 1
-; CHECK-NEXT:    [[MULSHL:%.*]] = shl i8 [[Y]], [[X:%.*]]
-; CHECK-NEXT:    [[M1:%.*]] = sub i8 [[MULSHL]], [[Y]]
-; CHECK-NEXT:    ret i8 [[M1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 7, [[X:%.*]]
+; CHECK-NEXT:    [[X1:%.*]] = lshr i8 -1, [[TMP1]]
+; CHECK-NEXT:    [[M:%.*]] = mul i8 [[Y]], [[X1]]
+; CHECK-NEXT:    ret i8 [[M]]
 ;
   %y = ashr i8 %p, 1 ; thwart complexity-based canonicalization
   %pow2x = shl i8 1, %x
@@ -286,8 +287,8 @@ define i8 @shl1_nsw_decrement(i8 %x, i8 %y) {
 
 define i32 @shl1_decrement_use(i32 %x, i32 %y) {
 ; CHECK-LABEL: @shl1_decrement_use(
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[X:%.*]]
-; CHECK-NEXT:    [[X1:%.*]] = xor i32 [[NOTMASK]], -1
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 31, [[X:%.*]]
+; CHECK-NEXT:    [[X1:%.*]] = lshr i32 -1, [[TMP1]]
 ; CHECK-NEXT:    call void @use32(i32 [[X1]])
 ; CHECK-NEXT:    [[M:%.*]] = mul i32 [[X1]], [[Y:%.*]]
 ; CHECK-NEXT:    ret i32 [[M]]
diff --git a/llvm/test/Transforms/InstCombine/rem-mul-shl.ll b/llvm/test/Transforms/InstCombine/rem-mul-shl.ll
index 9e2df157c2c85..b119c54b69207 100644
--- a/llvm/test/Transforms/InstCombine/rem-mul-shl.ll
+++ b/llvm/test/Transforms/InstCombine/rem-mul-shl.ll
@@ -20,9 +20,9 @@ define i8 @srem_non_matching(i8 %X, i8 %Y) {
 define i8 @urem_1_shl(i8 %X, i8 %Y) {
 ; CHECK-LABEL: @urem_1_shl(
 ; CHECK-NEXT:    [[BO0:%.*]] = shl nuw nsw i8 1, [[X:%.*]]
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i8 -1, [[Y:%.*]]
-; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[NOTMASK]], -1
-; CHECK-NEXT:    [[R:%.*]] = and i8 [[BO0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 7, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr i8 -1, [[TMP1]]
+; CHECK-NEXT:    [[R:%.*]] = and i8 [[BO0]], [[TMP2]]
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %BO0 = shl nsw nuw i8 1, %X
diff --git a/llvm/test/Transforms/InstCombine/rem.ll b/llvm/test/Transforms/InstCombine/rem.ll
index de484fe6df857..8b5ec8de46fdc 100644
--- a/llvm/test/Transforms/InstCombine/rem.ll
+++ b/llvm/test/Transforms/InstCombine/rem.ll
@@ -351,10 +351,10 @@ define i64 @test14(i64 %x, i32 %y) {
 
 define i64 @test15(i32 %x, i32 %y) {
 ; CHECK-LABEL: @test15(
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[Y:%.*]]
-; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[NOTMASK]], -1
-; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]]
-; CHECK-NEXT:    [[UREM:%.*]] = zext nneg i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 31, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 -1, [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[TMP2]], [[X:%.*]]
+; CHECK-NEXT:    [[UREM:%.*]] = zext i32 [[TMP3]] to i64
 ; CHECK-NEXT:    ret i64 [[UREM]]
 ;
   %shl = shl i32 1, %y
@@ -724,10 +724,10 @@ define <2 x i1> @test25_vec(<2 x i32> %A) {
 
 define i1 @test26(i32 %A, i32 %B) {
 ; CHECK-LABEL: @test26(
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[B:%.*]]
-; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[NOTMASK]], -1
-; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[A:%.*]]
-; CHECK-NEXT:    [[E:%.*]] = icmp ne i32 [[TMP2]], 0
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 31, [[B:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 -1, [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[TMP2]], [[A:%.*]]
+; CHECK-NEXT:    [[E:%.*]] = icmp ne i32 [[TMP3]], 0
 ; CHECK-NEXT:    ret i1 [[E]]
 ;
   %C = shl i32 1, %B ; not a constant
diff --git a/llvm/test/Transforms/InstCombine/select-divrem.ll b/llvm/test/Transforms/InstCombine/select-divrem.ll
index e0c460c37451d..2e40675bfae14 100644
--- a/llvm/test/Transforms/InstCombine/select-divrem.ll
+++ b/llvm/test/Transforms/InstCombine/select-divrem.ll
@@ -309,9 +309,9 @@ define i128 @rem_euclid_i128(i128 %0) {
 
 define i8 @rem_euclid_non_const_pow2(i8 %0, i8 %1) {
 ; CHECK-LABEL: @rem_euclid_non_const_pow2(
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i8 -1, [[TMP0:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i8 [[NOTMASK]], -1
-; CHECK-NEXT:    [[SEL:%.*]] = and i8 [[TMP3]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP3:%.*]] = sub i8 7, [[TMP0:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = lshr i8 -1, [[TMP3]]
+; CHECK-NEXT:    [[SEL:%.*]] = and i8 [[TMP4]], [[TMP1:%.*]]
 ; CHECK-NEXT:    ret i8 [[SEL]]
 ;
   %pow2 = shl i8 1, %0
diff --git a/llvm/test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll b/llvm/test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll
index a3c8d3393d04f..40aa8ea412c17 100644
--- a/llvm/test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll
+++ b/llvm/test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll
@@ -17,8 +17,8 @@
 
 define i32 @shl_add(i32 %NBits) {
 ; CHECK-LABEL: @shl_add(
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 31, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = lshr i32 -1, [[TMP1]]
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl i32 1, %NBits
@@ -28,8 +28,8 @@ define i32 @shl_add(i32 %NBits) {
 
 define i32 @shl_add_nsw(i32 %NBits) {
 ; CHECK-LABEL: @shl_add_nsw(
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 31, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = lshr i32 -1, [[TMP1]]
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl i32 1, %NBits
@@ -59,8 +59,8 @@ define i32 @shl_add_nsw_nuw(i32 %NBits) {
 
 define i32 @shl_nsw_add(i32 %NBits) {
 ; CHECK-LABEL: @shl_nsw_add(
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 31, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = lshr i32 -1, [[TMP1]]
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nsw i32 1, %NBits
@@ -70,8 +70,8 @@ define i32 @shl_nsw_add(i32 %NBits) {
 
 define i32 @shl_nsw_add_nsw(i32 %NBits) {
 ; CHECK-LABEL: @shl_nsw_add_nsw(
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 31, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = lshr i32 -1, [[TMP1]]
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nsw i32 1, %NBits
@@ -101,8 +101,8 @@ define i32 @shl_nsw_add_nsw_nuw(i32 %NBits) {
 
 define i32 @shl_nuw_add(i32 %NBits) {
 ; CHECK-LABEL: @shl_nuw_add(
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 31, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = lshr i32 -1, [[TMP1]]
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nuw i32 1, %NBits
@@ -112,8 +112,8 @@ define i32 @shl_nuw_add(i32 %NBits) {
 
 define i32 @shl_nuw_add_nsw(i32 %NBits) {
 ; CHECK-LABEL: @shl_nuw_add_nsw(
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 31, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = lshr i32 -1, [[TMP1]]
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nuw i32 1, %NBits
@@ -143,8 +143,8 @@ define i32 @shl_nuw_add_nsw_nuw(i32 %NBits) {
 
 define i32 @shl_nsw_nuw_add(i32 %NBits) {
 ; CHECK-LABEL: @shl_nsw_nuw_add(
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 31, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = lshr i32 -1, [[TMP1]]
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nuw nsw i32 1, %NBits
@@ -154,8 +154,8 @@ define i32 @shl_nsw_nuw_add(i32 %NBits) {
 
 define i32 @shl_nsw_nuw_add_nsw(i32 %NBits) {
 ; CHECK-LABEL: @shl_nsw_nuw_add_nsw(
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = xor i32 [[NOTMASK]], -1
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 31, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = lshr i32 -1, [[TMP1]]
 ; CHECK-NEXT:    ret i32 [[RET]]
 ;
   %setbit = shl nuw nsw i32 1, %NBits
@@ -187,8 +187,8 @@ define i32 @shl_nsw_nuw_add_nsw_nuw(i32 %NBits) {
 
 define <2 x i32> @shl_add_vec(<2 x i32> %NBits) {
 ; CHECK-LABEL: @shl_add_vec(
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw <2 x i32> <i32 -1, i32 -1>, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = xor <2 x i32> [[NOTMASK]], <i32 -1, i32 -1>
+; CHECK-NEXT:    [[TMP1:%.*]] = sub <2 x i32> <i32 31, i32 31>, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = lshr <2 x i32> <i32 -1, i32 -1>, [[TMP1]]
 ; CHECK-NEXT:    ret <2 x i32> [[RET]]
 ;
   %setbit = shl <2 x i32> <i32 1, i32 1>, %NBits
@@ -198,8 +198,8 @@ define <2 x i32> @shl_add_vec(<2 x i32> %NBits) {
 
 define <3 x i32> @shl_add_vec_poison0(<3 x i32> %NBits) {
 ; CHECK-LABEL: @shl_add_vec_poison0(
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = xor <3 x i32> [[NOTMASK]], <i32 -1, i32 -1, i32 -1>
+; CHECK-NEXT:    [[TMP1:%.*]] = sub <3 x i32> <i32 31, i32 31, i32 31>, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = lshr <3 x i32> <i32 -1, i32 -1, i32 -1>, [[TMP1]]
 ; CHECK-NEXT:    ret <3 x i32> [[RET]]
 ;
   %setbit = shl <3 x i32> <i32 1, i32 poison, i32 1>, %NBits
@@ -209,8 +209,8 @@ define <3 x i32> @shl_add_vec_poison0(<3 x i32> %NBits) {
 
 define <3 x i32> @shl_add_vec_poison1(<3 x i32> %NBits) {
 ; CHECK-LABEL: @shl_add_vec_poison1(
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = xor <3 x i32> [[NOTMASK]], <i32 -1, i32 -1, i32 -1>
+; CHECK-NEXT:    [[TMP1:%.*]] = sub <3 x i32> <i32 31, i32 31, i32 31>, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = lshr <3 x i32> <i32 -1, i32 -1, i32 -1>, [[TMP1]]
 ; CHECK-NEXT:    ret <3 x i32> [[RET]]
 ;
   %setbit = shl <3 x i32> <i32 1, i32 1, i32 1>, %NBits
@@ -220,8 +220,8 @@ define <3 x i32> @shl_add_vec_poison1(<3 x i32> %NBits) {
 
 define <3 x i32> @shl_add_vec_poison2(<3 x i32> %NBits) {
 ; CHECK-LABEL: @shl_add_vec_poison2(
-; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
-; CHECK-NEXT:    [[RET:%.*]] = xor <3 x i32> [[NOTMASK]], <i32 -1, i32 -1, i32 -1>
+; CHECK-NEXT:    [[TMP1:%.*]] = sub <3 x i32> <i32 31, i32 31, i32 31>, [[NBITS:%.*]]
+; CHECK-NEXT:    [[RET:%.*]] = lshr <3 x i32> <i32 -1, i32 -1, i32 -1>, [[TMP1]]
 ; CHECK-NEXT:    ret <3 x i32> [[RET]]
 ;
   %setbit = shl <3 x i32> <i32 1, i32 poison, i32 1>, %NBits

``````````

</details>


https://github.com/llvm/llvm-project/pull/97978


More information about the llvm-commits mailing list