[llvm] f7d1baa - [KnownBits] Return zero instead of unknown for always poison shifts

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Tue May 23 05:41:31 PDT 2023


Author: Nikita Popov
Date: 2023-05-23T14:41:22+02:00
New Revision: f7d1baa414e6e4fca04092cb2f71e9b037c762f0

URL: https://github.com/llvm/llvm-project/commit/f7d1baa414e6e4fca04092cb2f71e9b037c762f0
DIFF: https://github.com/llvm/llvm-project/commit/f7d1baa414e6e4fca04092cb2f71e9b037c762f0.diff

LOG: [KnownBits] Return zero instead of unknown for always poison shifts

For always poison shifts, any KnownBits return value is valid.
Currently we return unknown, but returning zero is generally more
profitable. We had some code in ValueTracking that tried to do this,
but was actually dead code.

Differential Revision: https://reviews.llvm.org/D150648

Added: 
    

Modified: 
    llvm/lib/Analysis/ValueTracking.cpp
    llvm/lib/Support/KnownBits.cpp
    llvm/test/Analysis/ScalarEvolution/ashr.ll
    llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-sextinreg.mir
    llvm/test/CodeGen/LoongArch/rotl-rotr.ll
    llvm/test/CodeGen/RISCV/rotl-rotr.ll
    llvm/test/Transforms/InstCombine/not-add.ll
    llvm/test/Transforms/InstCombine/oss_fuzz_32759.ll
    llvm/test/Transforms/InstCombine/shift.ll
    llvm/unittests/Analysis/ValueTrackingTest.cpp
    llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp
    llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 512661ba0a0cd..a70b4cf907a97 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -994,13 +994,6 @@ static void computeKnownBitsFromShiftOperator(
 
   if (ShiftAmtIsConstant) {
     Known = KF(Known2, Known);
-
-    // If the known bits conflict, this must be an overflowing left shift, so
-    // the shift result is poison. We can return anything we want. Choose 0 for
-    // the best folding opportunity.
-    if (Known.hasConflict())
-      Known.setAllZero();
-
     return;
   }
 

diff  --git a/llvm/lib/Support/KnownBits.cpp b/llvm/lib/Support/KnownBits.cpp
index a806ac2ad14eb..faa7424c87fe7 100644
--- a/llvm/lib/Support/KnownBits.cpp
+++ b/llvm/lib/Support/KnownBits.cpp
@@ -183,9 +183,11 @@ KnownBits KnownBits::shl(const KnownBits &LHS, const KnownBits &RHS) {
   unsigned MinTrailingZeros = LHS.countMinTrailingZeros();
 
   APInt MinShiftAmount = RHS.getMinValue();
-  if (MinShiftAmount.uge(BitWidth))
-    // Always poison. Return unknown because we don't like returning conflict.
+  if (MinShiftAmount.uge(BitWidth)) {
+    // Always poison. Return zero because we don't like returning conflict.
+    Known.setAllZero();
     return Known;
+  }
 
   // Minimum shift amount low bits are known zero.
   MinTrailingZeros += MinShiftAmount.getZExtValue();
@@ -240,9 +242,11 @@ KnownBits KnownBits::lshr(const KnownBits &LHS, const KnownBits &RHS) {
 
   // Minimum shift amount high bits are known zero.
   APInt MinShiftAmount = RHS.getMinValue();
-  if (MinShiftAmount.uge(BitWidth))
-    // Always poison. Return unknown because we don't like returning conflict.
+  if (MinShiftAmount.uge(BitWidth)) {
+    // Always poison. Return zero because we don't like returning conflict.
+    Known.setAllZero();
     return Known;
+  }
 
   MinLeadingZeros += MinShiftAmount.getZExtValue();
   MinLeadingZeros = std::min(MinLeadingZeros, BitWidth);
@@ -295,9 +299,11 @@ KnownBits KnownBits::ashr(const KnownBits &LHS, const KnownBits &RHS) {
 
   // Minimum shift amount high bits are known sign bits.
   APInt MinShiftAmount = RHS.getMinValue();
-  if (MinShiftAmount.uge(BitWidth))
-    // Always poison. Return unknown because we don't like returning conflict.
+  if (MinShiftAmount.uge(BitWidth)) {
+    // Always poison. Return zero because we don't like returning conflict.
+    Known.setAllZero();
     return Known;
+  }
 
   if (MinLeadingZeros) {
     MinLeadingZeros += MinShiftAmount.getZExtValue();

diff  --git a/llvm/test/Analysis/ScalarEvolution/ashr.ll b/llvm/test/Analysis/ScalarEvolution/ashr.ll
index 7861ae32cba2b..07e23cc1dcabe 100644
--- a/llvm/test/Analysis/ScalarEvolution/ashr.ll
+++ b/llvm/test/Analysis/ScalarEvolution/ashr.ll
@@ -51,7 +51,7 @@ define i32 @t4(i32 %x, i32 %y) {
 ; CHECK-LABEL: 't4'
 ; CHECK-NEXT:  Classifying expressions for: @t4
 ; CHECK-NEXT:    %i0 = ashr exact i32 %x, 32
-; CHECK-NEXT:    --> %i0 U: full-set S: full-set
+; CHECK-NEXT:    --> %i0 U: [0,1) S: [0,1)
 ; CHECK-NEXT:  Determining loop execution counts for: @t4
 ;
   %i0 = ashr exact i32 %x, 32

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-sextinreg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-sextinreg.mir
index aea2dd91184c7..8f9ca8ec538d1 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-sextinreg.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-sextinreg.mir
@@ -95,8 +95,7 @@ body:             |
     ; CHECK-NEXT: %x:_(s32) = COPY $w0
     ; CHECK-NEXT: %lsb:_(s32) = G_CONSTANT i32 32
     ; CHECK-NEXT: %shift:_(s32) = G_ASHR %x, %lsb(s32)
-    ; CHECK-NEXT: %sext_inreg:_(s32) = G_SEXT_INREG %shift, 1
-    ; CHECK-NEXT: $w0 = COPY %sext_inreg(s32)
+    ; CHECK-NEXT: $w0 = COPY %shift(s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %lsb:_(s32) = G_CONSTANT i32 32
@@ -122,8 +121,7 @@ body:             |
     ; CHECK-NEXT: %x:_(s32) = COPY $w0
     ; CHECK-NEXT: %lsb:_(s32) = G_CONSTANT i32 -1
     ; CHECK-NEXT: %shift:_(s32) = G_ASHR %x, %lsb(s32)
-    ; CHECK-NEXT: %sext_inreg:_(s32) = G_SEXT_INREG %shift, 1
-    ; CHECK-NEXT: $w0 = COPY %sext_inreg(s32)
+    ; CHECK-NEXT: $w0 = COPY %shift(s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %lsb:_(s32) = G_CONSTANT i32 -1

diff  --git a/llvm/test/CodeGen/LoongArch/rotl-rotr.ll b/llvm/test/CodeGen/LoongArch/rotl-rotr.ll
index f54a47ae847af..2162d4872db52 100644
--- a/llvm/test/CodeGen/LoongArch/rotl-rotr.ll
+++ b/llvm/test/CodeGen/LoongArch/rotl-rotr.ll
@@ -374,10 +374,9 @@ define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind {
 define i64 @rotl_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
 ; LA32-LABEL: rotl_64_mask_or_128_or_64:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    sll.w $a3, $a0, $a2
 ; LA32-NEXT:    sub.w $a0, $zero, $a2
 ; LA32-NEXT:    srl.w $a0, $a1, $a0
-; LA32-NEXT:    move $a1, $a3
+; LA32-NEXT:    move $a1, $zero
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotl_64_mask_or_128_or_64:
@@ -499,10 +498,8 @@ define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind {
 define i64 @rotr_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
 ; LA32-LABEL: rotr_64_mask_or_128_or_64:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    srl.w $a3, $a1, $a2
-; LA32-NEXT:    sub.w $a1, $zero, $a2
-; LA32-NEXT:    sll.w $a1, $a0, $a1
-; LA32-NEXT:    move $a0, $a3
+; LA32-NEXT:    srl.w $a0, $a1, $a2
+; LA32-NEXT:    move $a1, $zero
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: rotr_64_mask_or_128_or_64:

diff  --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
index 0ee1f3a8648fa..74bc333010dd5 100644
--- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll
+++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
@@ -516,20 +516,15 @@ define i32 @rotl_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind {
 define i32 @rotl_32_mask_or_64_or_32(i32 %x, i32 %y) nounwind {
 ; RV32I-LABEL: rotl_32_mask_or_64_or_32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    ori a2, a1, 64
-; RV32I-NEXT:    sll a2, a0, a2
 ; RV32I-NEXT:    neg a1, a1
 ; RV32I-NEXT:    ori a1, a1, 32
 ; RV32I-NEXT:    srl a0, a0, a1
-; RV32I-NEXT:    or a0, a2, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: rotl_32_mask_or_64_or_32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sllw a2, a0, a1
 ; RV64I-NEXT:    negw a1, a1
 ; RV64I-NEXT:    srlw a0, a0, a1
-; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: rotl_32_mask_or_64_or_32:
@@ -670,20 +665,13 @@ define i32 @rotr_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind {
 define i32 @rotr_32_mask_or_64_or_32(i32 %x, i32 %y) nounwind {
 ; RV32I-LABEL: rotr_32_mask_or_64_or_32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    ori a2, a1, 64
-; RV32I-NEXT:    srl a2, a0, a2
-; RV32I-NEXT:    neg a1, a1
-; RV32I-NEXT:    ori a1, a1, 32
-; RV32I-NEXT:    sll a0, a0, a1
-; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    ori a1, a1, 64
+; RV32I-NEXT:    srl a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: rotr_32_mask_or_64_or_32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    srlw a2, a0, a1
-; RV64I-NEXT:    negw a1, a1
-; RV64I-NEXT:    sllw a0, a0, a1
-; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: rotr_32_mask_or_64_or_32:
@@ -1013,28 +1001,23 @@ define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind {
 define i64 @rotl_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
 ; RV32I-LABEL: rotl_64_mask_or_128_or_64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    sll a3, a0, a2
 ; RV32I-NEXT:    neg a0, a2
 ; RV32I-NEXT:    srl a0, a1, a0
-; RV32I-NEXT:    mv a1, a3
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: rotl_64_mask_or_128_or_64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    ori a2, a1, 128
-; RV64I-NEXT:    sll a2, a0, a2
 ; RV64I-NEXT:    negw a1, a1
 ; RV64I-NEXT:    ori a1, a1, 64
 ; RV64I-NEXT:    srl a0, a0, a1
-; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: rotl_64_mask_or_128_or_64:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    sll a3, a0, a2
 ; RV32ZBB-NEXT:    neg a0, a2
 ; RV32ZBB-NEXT:    srl a0, a1, a0
-; RV32ZBB-NEXT:    mv a1, a3
+; RV32ZBB-NEXT:    li a1, 0
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: rotl_64_mask_or_128_or_64:
@@ -1044,10 +1027,9 @@ define i64 @rotl_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
 ;
 ; RV32XTHEADBB-LABEL: rotl_64_mask_or_128_or_64:
 ; RV32XTHEADBB:       # %bb.0:
-; RV32XTHEADBB-NEXT:    sll a3, a0, a2
 ; RV32XTHEADBB-NEXT:    neg a0, a2
 ; RV32XTHEADBB-NEXT:    srl a0, a1, a0
-; RV32XTHEADBB-NEXT:    mv a1, a3
+; RV32XTHEADBB-NEXT:    li a1, 0
 ; RV32XTHEADBB-NEXT:    ret
 ;
 ; RV64XTHEADBB-LABEL: rotl_64_mask_or_128_or_64:
@@ -1359,28 +1341,20 @@ define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind {
 define i64 @rotr_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
 ; RV32I-LABEL: rotr_64_mask_or_128_or_64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    srl a3, a1, a2
-; RV32I-NEXT:    neg a1, a2
-; RV32I-NEXT:    sll a1, a0, a1
-; RV32I-NEXT:    mv a0, a3
+; RV32I-NEXT:    srl a0, a1, a2
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: rotr_64_mask_or_128_or_64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    ori a2, a1, 128
-; RV64I-NEXT:    srl a2, a0, a2
-; RV64I-NEXT:    negw a1, a1
-; RV64I-NEXT:    ori a1, a1, 64
-; RV64I-NEXT:    sll a0, a0, a1
-; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    ori a1, a1, 128
+; RV64I-NEXT:    srl a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: rotr_64_mask_or_128_or_64:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    srl a3, a1, a2
-; RV32ZBB-NEXT:    neg a1, a2
-; RV32ZBB-NEXT:    sll a1, a0, a1
-; RV32ZBB-NEXT:    mv a0, a3
+; RV32ZBB-NEXT:    srl a0, a1, a2
+; RV32ZBB-NEXT:    li a1, 0
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: rotr_64_mask_or_128_or_64:
@@ -1390,10 +1364,8 @@ define i64 @rotr_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
 ;
 ; RV32XTHEADBB-LABEL: rotr_64_mask_or_128_or_64:
 ; RV32XTHEADBB:       # %bb.0:
-; RV32XTHEADBB-NEXT:    srl a3, a1, a2
-; RV32XTHEADBB-NEXT:    neg a1, a2
-; RV32XTHEADBB-NEXT:    sll a1, a0, a1
-; RV32XTHEADBB-NEXT:    mv a0, a3
+; RV32XTHEADBB-NEXT:    srl a0, a1, a2
+; RV32XTHEADBB-NEXT:    li a1, 0
 ; RV32XTHEADBB-NEXT:    ret
 ;
 ; RV64XTHEADBB-LABEL: rotr_64_mask_or_128_or_64:

diff  --git a/llvm/test/Transforms/InstCombine/not-add.ll b/llvm/test/Transforms/InstCombine/not-add.ll
index 03f4f445fa898..4fab55d6e7a12 100644
--- a/llvm/test/Transforms/InstCombine/not-add.ll
+++ b/llvm/test/Transforms/InstCombine/not-add.ll
@@ -170,11 +170,6 @@ cond.end:
 define void @pr50370(i32 %x) {
 ; CHECK-LABEL: @pr50370(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[X:%.*]], 1
-; CHECK-NEXT:    [[B15:%.*]] = srem i32 ashr (i32 65536, i32 or (i32 zext (i1 icmp eq (ptr @g, ptr null) to i32), i32 65537)), [[XOR]]
-; CHECK-NEXT:    [[B12:%.*]] = add nsw i32 [[B15]], ashr (i32 65536, i32 or (i32 zext (i1 icmp eq (ptr @g, ptr null) to i32), i32 65537))
-; CHECK-NEXT:    [[B:%.*]] = xor i32 [[B12]], -1
-; CHECK-NEXT:    store i32 [[B]], ptr undef, align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:

diff  --git a/llvm/test/Transforms/InstCombine/oss_fuzz_32759.ll b/llvm/test/Transforms/InstCombine/oss_fuzz_32759.ll
index 9f6db50d0c4bb..96a16751740c8 100644
--- a/llvm/test/Transforms/InstCombine/oss_fuzz_32759.ll
+++ b/llvm/test/Transforms/InstCombine/oss_fuzz_32759.ll
@@ -9,7 +9,7 @@ define i1 @oss_fuzz_32759(i1 %y, i1 %c1) {
 ; CHECK:       cond.true:
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
-; CHECK-NEXT:    ret i1 false
+; CHECK-NEXT:    ret i1 [[C1]]
 ;
 entry:
   br i1 %c1, label %cond.true, label %end

diff  --git a/llvm/test/Transforms/InstCombine/shift.ll b/llvm/test/Transforms/InstCombine/shift.ll
index b45ad7fa29ffb..c8e00cd062b5e 100644
--- a/llvm/test/Transforms/InstCombine/shift.ll
+++ b/llvm/test/Transforms/InstCombine/shift.ll
@@ -1743,12 +1743,6 @@ define void @ashr_out_of_range(ptr %A) {
 ; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=26135
 define void @ashr_out_of_range_1(ptr %A) {
 ; CHECK-LABEL: @ashr_out_of_range_1(
-; CHECK-NEXT:    [[L:%.*]] = load i177, ptr [[A:%.*]], align 4
-; CHECK-NEXT:    [[G11:%.*]] = getelementptr i177, ptr [[A]], i64 -1
-; CHECK-NEXT:    [[B24_LOBIT:%.*]] = ashr i177 [[L]], 175
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc i177 [[B24_LOBIT]] to i64
-; CHECK-NEXT:    [[G62:%.*]] = getelementptr i177, ptr [[G11]], i64 [[TMP1]]
-; CHECK-NEXT:    store i177 0, ptr [[G62]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %L = load i177, ptr %A, align 4

diff  --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp
index 3684f6fb7d539..e464407197d63 100644
--- a/llvm/unittests/Analysis/ValueTrackingTest.cpp
+++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp
@@ -694,7 +694,7 @@ TEST_F(ValueTrackingTest, ComputeNumSignBits_PR32045) {
       "  %A = ashr i32 %a, -1\n"
       "  ret i32 %A\n"
       "}\n");
-  EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 1u);
+  EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 32u);
 }
 
 // No guarantees for canonical IR in this analysis, so this just bails out.

diff  --git a/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp b/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp
index ae3ef04d844c0..f3659bc695636 100644
--- a/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp
@@ -1630,12 +1630,12 @@ TEST_F(AArch64GISelMITest, TestInvalidQueries) {
   KnownBits BiggerSizeRes = Info.getKnownBits(BiggerSizedShl);
 
 
-  // We don't know what the result of the shift is, but we should not crash
+  // Result can be anything, but we should not crash.
   EXPECT_TRUE(EqSizeRes.One.isZero());
-  EXPECT_TRUE(EqSizeRes.Zero.isZero());
+  EXPECT_TRUE(EqSizeRes.Zero.isAllOnes());
 
   EXPECT_TRUE(BiggerSizeRes.One.isZero());
-  EXPECT_TRUE(BiggerSizeRes.Zero.isZero());
+  EXPECT_TRUE(BiggerSizeRes.Zero.isAllOnes());
 }
 
 TEST_F(AArch64GISelMITest, TestKnownBitsAssertZext) {

diff  --git a/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp b/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
index fbbdf03d82f84..ab180cd1cc272 100644
--- a/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
@@ -1456,10 +1456,10 @@ TEST_F(AArch64GISelMITest, TestVectorInvalidQueries) {
   KnownBits BiggerSizeRes = Info.getKnownBits(BiggerSizedShl);
 
   EXPECT_TRUE(EqSizeRes.One.isZero());
-  EXPECT_TRUE(EqSizeRes.Zero.isZero());
+  EXPECT_TRUE(EqSizeRes.Zero.isAllOnes());
 
   EXPECT_TRUE(BiggerSizeRes.One.isZero());
-  EXPECT_TRUE(BiggerSizeRes.Zero.isZero());
+  EXPECT_TRUE(BiggerSizeRes.Zero.isAllOnes());
 }
 
 TEST_F(AArch64GISelMITest, TestKnownBitsVectorAssertZext) {


        


More information about the llvm-commits mailing list