[llvm] 9a8f517 - [ValueTracking] Add KnownBits patterns `xor(x, x - 1)` and `and(x, -x)` for knowing upper bits to be zero
Noah Goldstein via llvm-commits
llvm-commits at lists.llvm.org
Sat Feb 18 11:31:34 PST 2023
Author: Noah Goldstein
Date: 2023-02-18T13:31:17-06:00
New Revision: 9a8f517f5750050e9df4bca332e90d38d075f6a7
URL: https://github.com/llvm/llvm-project/commit/9a8f517f5750050e9df4bca332e90d38d075f6a7
DIFF: https://github.com/llvm/llvm-project/commit/9a8f517f5750050e9df4bca332e90d38d075f6a7.diff
LOG: [ValueTracking] Add KnownBits patterns `xor(x, x - 1)` and `and(x, -x)` for knowing upper bits to be zero
These two BMI pattern will clear the upper bits of result past the
first set bit. So if we know a single bit in `x` is set, we know that
`results[bitwidth - 1, log2(x) + 1] = 0`.
Alive2:
blsmsk: https://alive2.llvm.org/ce/z/a397BS
blsi: https://alive2.llvm.org/ce/z/tsbQhC
Differential Revision: https://reviews.llvm.org/D142271
Added:
Modified:
llvm/lib/Analysis/ValueTracking.cpp
llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll
llvm/test/Transforms/InstCombine/ctpop-pow2.ll
llvm/test/Transforms/InstSimplify/ctpop-pow2.ll
Removed:
################################################################################
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 05b802cfd8480..536d880a86001 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1077,6 +1077,22 @@ static void computeKnownBitsFromOperator(const Operator *I,
computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
+ Value *X = nullptr, *Y = nullptr;
+ // and(x, -x) is a common idiom for clearing all but lowest set bit. If we
+ // have a single known bit in x, we can clear all bits above it.
+ // TODO: instcombine often reassociates independent `and` which can hide
+ // this pattern. Try to match and(x, and(-x, y)) / and(and(x, y), -x).
+ if (!Known.One.isZero() || !Known2.One.isZero()) {
+ if (match(I, m_c_BinOp(m_Value(X), m_Neg(m_Deferred(X))))) {
+ // -(-x) == x so pick whichever we can get a better result with.
+ if (Known.countMaxTrailingZeros() <= Known2.countMaxTrailingZeros())
+ Known = Known.blsi();
+ else
+ Known = Known2.blsi();
+
+ break;
+ }
+ }
Known &= Known2;
// and(x, add (x, -1)) is a common idiom that always clears the low bit;
@@ -1084,7 +1100,6 @@ static void computeKnownBitsFromOperator(const Operator *I,
// matching the form add(x, add(x, y)) where y is odd.
// TODO: This could be generalized to clearing any bit set in y where the
// following bit is known to be unset in y.
- Value *X = nullptr, *Y = nullptr;
if (!Known.Zero[0] && !Known.One[0] &&
match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
Known2.resetAll();
@@ -1100,12 +1115,26 @@ static void computeKnownBitsFromOperator(const Operator *I,
Known |= Known2;
break;
- case Instruction::Xor:
+ case Instruction::Xor: {
computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
- Known ^= Known2;
- break;
+ Value *X = nullptr;
+ // xor(x, x + -1) is a common idiom that will clear all bits above
+ // the lowest set bit. We can safely say any bit past the lowest
+ // known one must be zero.
+ // TODO: `x + -1` is often shrunk `x + C` which `C` is minimum bits needed
+ // for demanded. This can cause us to miss this pattern. Expand to account
+ // for `x + -1` in the context of demanded bits.
+ if ((!Known.One.isZero() || !Known2.One.isZero()) &&
+ match(I, m_c_BinOp(m_Value(X), m_c_Add(m_Deferred(X), m_AllOnes())))) {
+ // Known2 is confusingly LHS.
+ const KnownBits &XBits = I->getOperand(0) == X ? Known2 : Known;
+ Known = XBits.blsmsk();
+ } else {
+ Known ^= Known2;
+ }
+ } break;
case Instruction::Mul: {
bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
diff --git a/llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll b/llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll
index aa192c38ed747..e3a36fb2387a4 100644
--- a/llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll
+++ b/llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll
@@ -27,11 +27,7 @@ define <2 x i1> @blsmsk_ne_is_true_vec(<2 x i32> %x) {
define <2 x i1> @blsmsk_ne_is_true_
diff _vec(<2 x i32> %x) {
; CHECK-LABEL: @blsmsk_ne_is_true_
diff _vec(
-; CHECK-NEXT: [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 10, i32 130>
-; CHECK-NEXT: [[X2:%.*]] = add nsw <2 x i32> [[X1]], <i32 -1, i32 -1>
-; CHECK-NEXT: [[X3:%.*]] = xor <2 x i32> [[X2]], [[X1]]
-; CHECK-NEXT: [[Z:%.*]] = icmp ne <2 x i32> [[X3]], <i32 8, i32 8>
-; CHECK-NEXT: ret <2 x i1> [[Z]]
+; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
;
%x1 = or <2 x i32> %x, <i32 10, i32 130>
%x2 = sub <2 x i32> %x1, <i32 1, i32 1>
@@ -72,11 +68,7 @@ define <2 x i1> @blsmsk_gt_is_false_vec(<2 x i32> %x) {
define i1 @blsmsk_signed_is_false(i32 %x) {
; CHECK-LABEL: @blsmsk_signed_is_false(
-; CHECK-NEXT: [[X1:%.*]] = or i32 [[X:%.*]], 10
-; CHECK-NEXT: [[X2:%.*]] = add nsw i32 [[X1]], -1
-; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X2]], [[X]]
-; CHECK-NEXT: [[Z:%.*]] = icmp slt i32 [[X3]], 0
-; CHECK-NEXT: ret i1 [[Z]]
+; CHECK-NEXT: ret i1 false
;
%x1 = or i32 %x, 10
%x2 = sub i32 %x1, 1
@@ -87,11 +79,7 @@ define i1 @blsmsk_signed_is_false(i32 %x) {
define i32 @blsmsk_add_eval(i32 %x) {
; CHECK-LABEL: @blsmsk_add_eval(
-; CHECK-NEXT: [[X1:%.*]] = or i32 [[X:%.*]], 9
-; CHECK-NEXT: [[X2:%.*]] = add nsw i32 [[X1]], -1
-; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X2]], [[X1]]
-; CHECK-NEXT: [[Z:%.*]] = add i32 [[X3]], 32
-; CHECK-NEXT: ret i32 [[Z]]
+; CHECK-NEXT: ret i32 33
;
%x1 = or i32 %x, 9
%x2 = sub i32 %x1, 1
@@ -105,7 +93,7 @@ define <2 x i32> @blsmsk_add_eval_vec(<2 x i32> %x) {
; CHECK-NEXT: [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 9, i32 9>
; CHECK-NEXT: [[X2:%.*]] = add nsw <2 x i32> [[X1]], <i32 -1, i32 -1>
; CHECK-NEXT: [[X3:%.*]] = xor <2 x i32> [[X2]], [[X1]]
-; CHECK-NEXT: [[Z:%.*]] = add <2 x i32> [[X3]], <i32 32, i32 32>
+; CHECK-NEXT: [[Z:%.*]] = or <2 x i32> [[X3]], <i32 32, i32 32>
; CHECK-NEXT: ret <2 x i32> [[Z]]
;
%x1 = or <2 x i32> %x, <i32 9, i32 9>
@@ -118,9 +106,9 @@ define <2 x i32> @blsmsk_add_eval_vec(<2 x i32> %x) {
define i32 @blsmsk_sub_eval(i32 %x) {
; CHECK-LABEL: @blsmsk_sub_eval(
; CHECK-NEXT: [[X1:%.*]] = or i32 [[X:%.*]], 9
-; CHECK-NEXT: [[X2:%.*]] = add nsw i32 [[X1]], -1
+; CHECK-NEXT: [[X2:%.*]] = add i32 [[X1]], 31
; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X1]], [[X2]]
-; CHECK-NEXT: [[Z:%.*]] = add i32 [[X3]], -32
+; CHECK-NEXT: [[Z:%.*]] = or i32 [[X3]], -32
; CHECK-NEXT: ret i32 [[Z]]
;
%x1 = or i32 %x, 9
@@ -132,11 +120,7 @@ define i32 @blsmsk_sub_eval(i32 %x) {
define i32 @blsmsk_or_eval(i32 %x) {
; CHECK-LABEL: @blsmsk_or_eval(
-; CHECK-NEXT: [[X1:%.*]] = or i32 [[X:%.*]], 129
-; CHECK-NEXT: [[X2:%.*]] = add nsw i32 [[X1]], -1
-; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X2]], [[X1]]
-; CHECK-NEXT: [[Z:%.*]] = or i32 [[X3]], 32
-; CHECK-NEXT: ret i32 [[Z]]
+; CHECK-NEXT: ret i32 33
;
%x1 = or i32 %x, 129
%x2 = sub i32 %x1, 1
@@ -162,11 +146,7 @@ define <2 x i32> @blsmsk_or_eval_vec(<2 x i32> %x) {
define i32 @blsmsk_xor_eval(i32 %x) {
; CHECK-LABEL: @blsmsk_xor_eval(
-; CHECK-NEXT: [[X1:%.*]] = or i32 [[X:%.*]], 255
-; CHECK-NEXT: [[X2:%.*]] = add nsw i32 [[X1]], -1
-; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X1]], [[X2]]
-; CHECK-NEXT: [[Z1:%.*]] = or i32 [[X3]], 32
-; CHECK-NEXT: ret i32 [[Z1]]
+; CHECK-NEXT: ret i32 33
;
%x1 = or i32 %x, 255
%x2 = sub i32 %x1, 1
@@ -232,10 +212,7 @@ define i1 @blsmsk_eq_is_false_assume(i32 %x) {
; CHECK-NEXT: [[LB:%.*]] = and i32 [[X:%.*]], 4
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[LB]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: [[X2:%.*]] = add nsw i32 [[X]], -1
-; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X2]], [[X]]
-; CHECK-NEXT: [[Z:%.*]] = icmp eq i32 [[X3]], 8
-; CHECK-NEXT: ret i1 [[Z]]
+; CHECK-NEXT: ret i1 false
;
%lb = and i32 %x, 4
%cmp = icmp ne i32 %lb, 0
@@ -270,10 +247,7 @@ define i32 @blsmsk_add_eval_assume(i32 %x) {
; CHECK-NEXT: [[LB:%.*]] = and i32 [[X:%.*]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[LB]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: [[X2:%.*]] = add nsw i32 [[X]], -1
-; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X2]], [[X]]
-; CHECK-NEXT: [[Z:%.*]] = add i32 [[X3]], 32
-; CHECK-NEXT: ret i32 [[Z]]
+; CHECK-NEXT: ret i32 33
;
%lb = and i32 %x, 1
%cmp = icmp ne i32 %lb, 0
@@ -313,9 +287,9 @@ define i32 @blsmsk_sub_eval_assume(i32 %x) {
; CHECK-NEXT: [[LB:%.*]] = and i32 [[X:%.*]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[LB]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: [[X2:%.*]] = add nsw i32 [[X]], -1
+; CHECK-NEXT: [[X2:%.*]] = add i32 [[X]], 31
; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X2]], [[X]]
-; CHECK-NEXT: [[Z:%.*]] = add i32 [[X3]], -32
+; CHECK-NEXT: [[Z:%.*]] = or i32 [[X3]], -32
; CHECK-NEXT: ret i32 [[Z]]
;
%lb = and i32 %x, 1
@@ -332,10 +306,7 @@ define i32 @blsmsk_or_eval_assume(i32 %x) {
; CHECK-NEXT: [[LB:%.*]] = and i32 [[X:%.*]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[LB]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: [[X2:%.*]] = add nsw i32 [[X]], -1
-; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X2]], [[X]]
-; CHECK-NEXT: [[Z:%.*]] = or i32 [[X3]], 32
-; CHECK-NEXT: ret i32 [[Z]]
+; CHECK-NEXT: ret i32 33
;
%lb = and i32 %x, 1
%cmp = icmp ne i32 %lb, 0
@@ -436,11 +407,7 @@ define i1 @blsi_gt_is_false(i32 %x) {
define i32 @blsi_add_eval(i32 %x) {
; CHECK-LABEL: @blsi_add_eval(
-; CHECK-NEXT: [[X1:%.*]] = or i32 [[X:%.*]], 9
-; CHECK-NEXT: [[X2:%.*]] = sub nsw i32 0, [[X1]]
-; CHECK-NEXT: [[X3:%.*]] = and i32 [[X1]], [[X2]]
-; CHECK-NEXT: [[Z:%.*]] = add i32 [[X3]], 32
-; CHECK-NEXT: ret i32 [[Z]]
+; CHECK-NEXT: ret i32 33
;
%x1 = or i32 %x, 9
%x2 = sub i32 0, %x1
@@ -451,11 +418,7 @@ define i32 @blsi_add_eval(i32 %x) {
define i32 @blsi_sub_eval(i32 %x) {
; CHECK-LABEL: @blsi_sub_eval(
-; CHECK-NEXT: [[X1:%.*]] = or i32 [[X:%.*]], 33
-; CHECK-NEXT: [[X2:%.*]] = sub nsw i32 0, [[X1]]
-; CHECK-NEXT: [[X3:%.*]] = and i32 [[X1]], [[X2]]
-; CHECK-NEXT: [[Z:%.*]] = add i32 [[X3]], -32
-; CHECK-NEXT: ret i32 [[Z]]
+; CHECK-NEXT: ret i32 -31
;
%x1 = or i32 %x, 33
%x2 = sub i32 0, %x1
@@ -469,7 +432,7 @@ define <2 x i32> @blsi_sub_eval_vec(<2 x i32> %x) {
; CHECK-NEXT: [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 33, i32 33>
; CHECK-NEXT: [[X2:%.*]] = sub nsw <2 x i32> zeroinitializer, [[X1]]
; CHECK-NEXT: [[X3:%.*]] = and <2 x i32> [[X1]], [[X2]]
-; CHECK-NEXT: [[Z:%.*]] = add <2 x i32> [[X3]], <i32 -32, i32 -32>
+; CHECK-NEXT: [[Z:%.*]] = or <2 x i32> [[X3]], <i32 -32, i32 -32>
; CHECK-NEXT: ret <2 x i32> [[Z]]
;
%x1 = or <2 x i32> %x, <i32 33, i32 33>
@@ -481,11 +444,7 @@ define <2 x i32> @blsi_sub_eval_vec(<2 x i32> %x) {
define i32 @blsi_or_eval(i32 %x) {
; CHECK-LABEL: @blsi_or_eval(
-; CHECK-NEXT: [[X1:%.*]] = or i32 [[X:%.*]], 129
-; CHECK-NEXT: [[X2:%.*]] = sub nsw i32 0, [[X1]]
-; CHECK-NEXT: [[X3:%.*]] = and i32 [[X1]], [[X2]]
-; CHECK-NEXT: [[Z:%.*]] = or i32 [[X3]], 32
-; CHECK-NEXT: ret i32 [[Z]]
+; CHECK-NEXT: ret i32 33
;
%x1 = or i32 %x, 129
%x2 = sub i32 0, %x1
@@ -580,10 +539,7 @@ define i1 @blsi_ne_is_true_assume(i32 %x) {
; CHECK-NEXT: [[LB:%.*]] = and i32 [[X:%.*]], 4
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[LB]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: [[X2:%.*]] = sub nsw i32 0, [[X]]
-; CHECK-NEXT: [[X3:%.*]] = and i32 [[X2]], [[X]]
-; CHECK-NEXT: [[Z:%.*]] = icmp ne i32 [[X3]], 8
-; CHECK-NEXT: ret i1 [[Z]]
+; CHECK-NEXT: ret i1 true
;
%lb = and i32 %x, 4
%cmp = icmp ne i32 %lb, 0
@@ -633,10 +589,7 @@ define i32 @blsi_xor_eval_assume(i32 %x) {
; CHECK-NEXT: [[LB:%.*]] = and i32 [[X:%.*]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[LB]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: [[X2:%.*]] = sub nsw i32 0, [[X]]
-; CHECK-NEXT: [[X3:%.*]] = and i32 [[X2]], [[X]]
-; CHECK-NEXT: [[Z:%.*]] = xor i32 [[X3]], 32
-; CHECK-NEXT: ret i32 [[Z]]
+; CHECK-NEXT: ret i32 33
;
%lb = and i32 %x, 1
%cmp = icmp ne i32 %lb, 0
@@ -652,10 +605,7 @@ define i32 @blsi_and_eval_assume(i32 %x) {
; CHECK-NEXT: [[LB:%.*]] = and i32 [[X:%.*]], 8
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[LB]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: [[X2:%.*]] = sub nsw i32 0, [[X]]
-; CHECK-NEXT: [[X3:%.*]] = and i32 [[X2]], [[X]]
-; CHECK-NEXT: [[Z:%.*]] = and i32 [[X3]], 32
-; CHECK-NEXT: ret i32 [[Z]]
+; CHECK-NEXT: ret i32 0
;
%lb = and i32 %x, 8
%cmp = icmp ne i32 %lb, 0
@@ -728,7 +678,7 @@ define i32 @blsmsk_add_no_eval2(i32 %x) {
; CHECK-NEXT: [[X1:%.*]] = or i32 [[X:%.*]], 256
; CHECK-NEXT: [[X2:%.*]] = add nsw i32 [[X1]], -1
; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X1]], [[X2]]
-; CHECK-NEXT: [[Z:%.*]] = add i32 [[X3]], 32
+; CHECK-NEXT: [[Z:%.*]] = add nuw nsw i32 [[X3]], 32
; CHECK-NEXT: ret i32 [[Z]]
;
%x1 = or i32 %x, 256
@@ -841,14 +791,11 @@ define <2 x i32> @blsi_or_no_partial_eval_vec(<2 x i32> %x) {
;; Test that if we have
diff erent knowledge about lowbit of X/-X that we select the minimum.
define i1 @blsi_
diff ering_lowbits(i8 %x) {
; CHECK-LABEL: @blsi_
diff ering_lowbits(
-; CHECK-NEXT: [[Y:%.*]] = or i8 [[X:%.*]], 8
-; CHECK-NEXT: [[Z:%.*]] = sub nsw i8 0, [[Y]]
+; CHECK-NEXT: [[Z:%.*]] = sub i8 0, [[X:%.*]]
; CHECK-NEXT: [[LB:%.*]] = and i8 [[Z]], 2
; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[LB]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[NE]])
-; CHECK-NEXT: [[O:%.*]] = and i8 [[Y]], [[Z]]
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[O]], 4
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: ret i1 false
;
%y = or i8 %x, 8
%z = sub i8 0, %y
@@ -869,9 +816,7 @@ define i1 @blsi_
diff ering_lowbits2(i8 %x) {
; CHECK-NEXT: [[LB2:%.*]] = and i8 [[X]], 2
; CHECK-NEXT: [[NE2:%.*]] = icmp ne i8 [[LB2]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[NE2]])
-; CHECK-NEXT: [[O:%.*]] = and i8 [[Z]], [[X]]
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[O]], 4
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: ret i1 false
;
%z = sub i8 0, %x
%lb = and i8 %z, 8
diff --git a/llvm/test/Transforms/InstCombine/ctpop-pow2.ll b/llvm/test/Transforms/InstCombine/ctpop-pow2.ll
index 6ae5d32254120..f509fd97b295c 100644
--- a/llvm/test/Transforms/InstCombine/ctpop-pow2.ll
+++ b/llvm/test/Transforms/InstCombine/ctpop-pow2.ll
@@ -144,7 +144,10 @@ define <2 x i64> @ctpop_x_and_negx_vec(<2 x i64> %x) {
define <2 x i32> @ctpop_x_and_negx_vec_nz(<2 x i32> %x) {
; CHECK-LABEL: @ctpop_x_and_negx_vec_nz(
-; CHECK-NEXT: ret <2 x i32> <i32 1, i32 1>
+; CHECK-NEXT: [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 1, i32 1>
+; CHECK-NEXT: [[SUB:%.*]] = sub nsw <2 x i32> zeroinitializer, [[X1]]
+; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X1]], [[SUB]]
+; CHECK-NEXT: ret <2 x i32> [[AND]]
;
%x1 = or <2 x i32> %x, <i32 1 ,i32 1>
%sub = sub <2 x i32> <i32 0 ,i32 0>, %x1
diff --git a/llvm/test/Transforms/InstSimplify/ctpop-pow2.ll b/llvm/test/Transforms/InstSimplify/ctpop-pow2.ll
index 2711d36d6a461..eae368f03ca7e 100644
--- a/llvm/test/Transforms/InstSimplify/ctpop-pow2.ll
+++ b/llvm/test/Transforms/InstSimplify/ctpop-pow2.ll
@@ -44,8 +44,7 @@ define i8 @ctpop_x_nz_and_negx(i8 %x) {
; CHECK-NEXT: [[X1:%.*]] = or i8 [[X:%.*]], 1
; CHECK-NEXT: [[V0:%.*]] = sub i8 0, [[X1]]
; CHECK-NEXT: [[V1:%.*]] = and i8 [[X1]], [[V0]]
-; CHECK-NEXT: [[CNT:%.*]] = call i8 @llvm.ctpop.i8(i8 [[V1]])
-; CHECK-NEXT: ret i8 [[CNT]]
+; CHECK-NEXT: ret i8 [[V1]]
;
%x1 = or i8 %x, 1
%v0 = sub i8 0, %x1
More information about the llvm-commits
mailing list