[llvm] [InstCombine] Check isGuaranteedNotToBeUndef in haveNoCommonBitsSetSp… (PR #74390)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 4 16:58:30 PST 2023
https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/74390
>From 523f373dc140d189948aa944e30065ee2ef1c3c2 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 4 Dec 2023 15:27:08 -0800
Subject: [PATCH 1/2] [InstCombine] Check isGuaranteedNotToBeUndef in
haveNoCommonBitsSetSpecialCases.
It's not safe for InstCombine to add disjoint metadata when converting
add to or otherwise.
I've added noundef attribute to preserve existing test behavior.
A few questions.
Do we only want to do this when the caller is going to set the disjoint flag?
Alternatively, we could freeze the inputs?
---
llvm/lib/Analysis/ValueTracking.cpp | 24 ++++++----
llvm/test/Transforms/InstCombine/add.ll | 46 +++++++++----------
.../test/Transforms/InstCombine/and-or-not.ll | 2 +-
.../Transforms/InstCombine/logical-select.ll | 6 +--
.../InstCombine/masked-merge-add.ll | 22 ++++-----
.../Transforms/InstCombine/masked-merge-or.ll | 22 ++++-----
.../InstCombine/masked-merge-xor.ll | 22 ++++-----
llvm/test/Transforms/InstCombine/pr53357.ll | 14 +++---
llvm/unittests/Analysis/ValueTrackingTest.cpp | 6 +--
9 files changed, 86 insertions(+), 78 deletions(-)
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 1f09d912f7339..ad1806ee231cf 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -187,36 +187,44 @@ KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
}
static bool haveNoCommonBitsSetSpecialCases(const Value *LHS,
- const Value *RHS) {
+ const Value *RHS,
+ const SimplifyQuery &SQ) {
// Look for an inverted mask: (X & ~M) op (Y & M).
{
Value *M;
if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
- match(RHS, m_c_And(m_Specific(M), m_Value())))
+ match(RHS, m_c_And(m_Specific(M), m_Value())) &&
+ isGuaranteedNotToBeUndef(M, SQ.AC, SQ.CxtI, SQ.DT))
return true;
}
// X op (Y & ~X)
- if (match(RHS, m_c_And(m_Not(m_Specific(LHS)), m_Value())))
+ if (match(RHS, m_c_And(m_Not(m_Specific(LHS)), m_Value())) &&
+ isGuaranteedNotToBeUndef(LHS, SQ.AC, SQ.CxtI, SQ.DT))
return true;
// X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern
// for constant Y.
Value *Y;
- if (match(RHS, m_c_Xor(m_c_And(m_Specific(LHS), m_Value(Y)), m_Deferred(Y))))
+ if (match(RHS, m_c_Xor(m_c_And(m_Specific(LHS), m_Value(Y)), m_Deferred(Y))) &&
+ isGuaranteedNotToBeUndef(LHS, SQ.AC, SQ.CxtI, SQ.DT) &&
+ isGuaranteedNotToBeUndef(Y, SQ.AC, SQ.CxtI, SQ.DT))
return true;
// Peek through extends to find a 'not' of the other side:
// (ext Y) op ext(~Y)
if (match(LHS, m_ZExtOrSExt(m_Value(Y))) &&
- match(RHS, m_ZExtOrSExt(m_Not(m_Specific(Y)))))
+ match(RHS, m_ZExtOrSExt(m_Not(m_Specific(Y)))) &&
+ isGuaranteedNotToBeUndef(Y, SQ.AC, SQ.CxtI, SQ.DT))
return true;
// Look for: (A & B) op ~(A | B)
{
Value *A, *B;
if (match(LHS, m_And(m_Value(A), m_Value(B))) &&
- match(RHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
+ match(RHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))) &&
+ isGuaranteedNotToBeUndef(A, SQ.AC, SQ.CxtI, SQ.DT) &&
+ isGuaranteedNotToBeUndef(B, SQ.AC, SQ.CxtI, SQ.DT))
return true;
}
@@ -234,8 +242,8 @@ bool llvm::haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
assert(LHS->getType()->isIntOrIntVectorTy() &&
"LHS and RHS should be integers");
- if (haveNoCommonBitsSetSpecialCases(LHS, RHS) ||
- haveNoCommonBitsSetSpecialCases(RHS, LHS))
+ if (haveNoCommonBitsSetSpecialCases(LHS, RHS, SQ) ||
+ haveNoCommonBitsSetSpecialCases(RHS, LHS, SQ))
return true;
return KnownBits::haveNoCommonBitsSet(LHSCache.getKnownBits(SQ),
diff --git a/llvm/test/Transforms/InstCombine/add.ll b/llvm/test/Transforms/InstCombine/add.ll
index db9eafe998ebb..b9280016b1e62 100644
--- a/llvm/test/Transforms/InstCombine/add.ll
+++ b/llvm/test/Transforms/InstCombine/add.ll
@@ -1511,7 +1511,7 @@ define i8 @add_like_or_t2_extrause(i8 %x) {
ret i8 %r
}
-define i8 @add_and_xor(i8 %x, i8 %y) {
+define i8 @add_and_xor(i8 noundef %x, i8 %y) {
; CHECK-LABEL: @add_and_xor(
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i8 [[ADD]]
@@ -1548,7 +1548,7 @@ define i8 @add_and_xor_wrong_op(i8 %x, i8 %y, i8 %z) {
ret i8 %add
}
-define i8 @add_and_xor_commuted1(i8 %x, i8 %_y) {
+define i8 @add_and_xor_commuted1(i8 noundef %x, i8 %_y) {
; CHECK-LABEL: @add_and_xor_commuted1(
; CHECK-NEXT: [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[Y]], [[X:%.*]]
@@ -1561,7 +1561,7 @@ define i8 @add_and_xor_commuted1(i8 %x, i8 %_y) {
ret i8 %add
}
-define i8 @add_and_xor_commuted2(i8 %_x, i8 %y) {
+define i8 @add_and_xor_commuted2(i8 noundef %_x, i8 %y) {
; CHECK-LABEL: @add_and_xor_commuted2(
; CHECK-NEXT: [[X:%.*]] = udiv i8 42, [[_X:%.*]]
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[X]], [[Y:%.*]]
@@ -1574,7 +1574,7 @@ define i8 @add_and_xor_commuted2(i8 %_x, i8 %y) {
ret i8 %add
}
-define i8 @add_and_xor_commuted3(i8 %_x, i8 %_y) {
+define i8 @add_and_xor_commuted3(i8 noundef %_x, i8 %_y) {
; CHECK-LABEL: @add_and_xor_commuted3(
; CHECK-NEXT: [[X:%.*]] = udiv i8 42, [[_X:%.*]]
; CHECK-NEXT: [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
@@ -1589,7 +1589,7 @@ define i8 @add_and_xor_commuted3(i8 %_x, i8 %_y) {
ret i8 %add
}
-define i8 @add_and_xor_extra_use(i8 %x, i8 %y) {
+define i8 @add_and_xor_extra_use(i8 noundef %x, i8 %y) {
; CHECK-LABEL: @add_and_xor_extra_use(
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
; CHECK-NEXT: call void @use(i8 [[XOR]])
@@ -1606,7 +1606,7 @@ define i8 @add_and_xor_extra_use(i8 %x, i8 %y) {
ret i8 %add
}
-define i8 @add_xor_and_const(i8 %x) {
+define i8 @add_xor_and_const(i8 noundef %x) {
; CHECK-LABEL: @add_xor_and_const(
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[X:%.*]], 42
; CHECK-NEXT: ret i8 [[ADD]]
@@ -1630,7 +1630,7 @@ define i8 @add_xor_and_const_wrong_const(i8 %x) {
ret i8 %add
}
-define i8 @add_xor_and_var(i8 %x, i8 %y) {
+define i8 @add_xor_and_var(i8 noundef %x, i8 noundef %y) {
; CHECK-LABEL: @add_xor_and_var(
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: call void @use(i8 [[AND]])
@@ -1674,7 +1674,7 @@ define i8 @add_xor_and_var_wrong_op2(i8 %x, i8 %y, i8 %z) {
ret i8 %add
}
-define i8 @add_xor_and_var_commuted1(i8 %x, i8 %y) {
+define i8 @add_xor_and_var_commuted1(i8 noundef %x, i8 noundef %y) {
; CHECK-LABEL: @add_xor_and_var_commuted1(
; CHECK-NEXT: [[AND:%.*]] = and i8 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: call void @use(i8 [[AND]])
@@ -1688,7 +1688,7 @@ define i8 @add_xor_and_var_commuted1(i8 %x, i8 %y) {
ret i8 %add
}
-define i8 @add_xor_and_var_commuted2(i8 %_x, i8 %_y) {
+define i8 @add_xor_and_var_commuted2(i8 noundef %_x, i8 noundef %_y) {
; CHECK-LABEL: @add_xor_and_var_commuted2(
; CHECK-NEXT: [[X:%.*]] = udiv i8 42, [[_X:%.*]]
; CHECK-NEXT: [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
@@ -1706,7 +1706,7 @@ define i8 @add_xor_and_var_commuted2(i8 %_x, i8 %_y) {
ret i8 %add
}
-define i8 @add_xor_and_var_commuted3(i8 %x, i8 %_y) {
+define i8 @add_xor_and_var_commuted3(i8 noundef %x, i8 noundef %_y) {
; CHECK-LABEL: @add_xor_and_var_commuted3(
; CHECK-NEXT: [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
; CHECK-NEXT: [[AND:%.*]] = and i8 [[Y]], [[X:%.*]]
@@ -1722,7 +1722,7 @@ define i8 @add_xor_and_var_commuted3(i8 %x, i8 %_y) {
ret i8 %add
}
-define i8 @add_xor_and_var_commuted4(i8 %_x, i8 %y) {
+define i8 @add_xor_and_var_commuted4(i8 noundef %_x, i8 noundef %y) {
; CHECK-LABEL: @add_xor_and_var_commuted4(
; CHECK-NEXT: [[X:%.*]] = udiv i8 42, [[_X:%.*]]
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[Y:%.*]]
@@ -1738,7 +1738,7 @@ define i8 @add_xor_and_var_commuted4(i8 %_x, i8 %y) {
ret i8 %add
}
-define i8 @add_xor_and_var_commuted5(i8 %_x, i8 %_y) {
+define i8 @add_xor_and_var_commuted5(i8 noundef %_x, i8 noundef %_y) {
; CHECK-LABEL: @add_xor_and_var_commuted5(
; CHECK-NEXT: [[X:%.*]] = udiv i8 42, [[_X:%.*]]
; CHECK-NEXT: [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
@@ -1756,7 +1756,7 @@ define i8 @add_xor_and_var_commuted5(i8 %_x, i8 %_y) {
ret i8 %add
}
-define i8 @add_xor_and_var_commuted6(i8 %_x, i8 %_y) {
+define i8 @add_xor_and_var_commuted6(i8 noundef %_x, i8 noundef %_y) {
; CHECK-LABEL: @add_xor_and_var_commuted6(
; CHECK-NEXT: [[X:%.*]] = udiv i8 42, [[_X:%.*]]
; CHECK-NEXT: [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
@@ -1774,7 +1774,7 @@ define i8 @add_xor_and_var_commuted6(i8 %_x, i8 %_y) {
ret i8 %add
}
-define i8 @add_xor_and_var_commuted7(i8 %_x, i8 %_y) {
+define i8 @add_xor_and_var_commuted7(i8 noundef %_x, i8 noundef %_y) {
; CHECK-LABEL: @add_xor_and_var_commuted7(
; CHECK-NEXT: [[X:%.*]] = udiv i8 42, [[_X:%.*]]
; CHECK-NEXT: [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
@@ -1792,7 +1792,7 @@ define i8 @add_xor_and_var_commuted7(i8 %_x, i8 %_y) {
ret i8 %add
}
-define i8 @add_xor_and_var_extra_use(i8 %x, i8 %y) {
+define i8 @add_xor_and_var_extra_use(i8 noundef %x, i8 noundef %y) {
; CHECK-LABEL: @add_xor_and_var_extra_use(
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: call void @use(i8 [[AND]])
@@ -2573,7 +2573,7 @@ define <vscale x 1 x i32> @add_to_or_scalable(<vscale x 1 x i32> %in) {
ret <vscale x 1 x i32> %add
}
-define i5 @zext_zext_not(i3 %x) {
+define i5 @zext_zext_not(i3 noundef %x) {
; CHECK-LABEL: @zext_zext_not(
; CHECK-NEXT: ret i5 7
;
@@ -2584,7 +2584,7 @@ define i5 @zext_zext_not(i3 %x) {
ret i5 %r
}
-define <2 x i5> @zext_zext_not_commute(<2 x i3> %x) {
+define <2 x i5> @zext_zext_not_commute(<2 x i3> noundef %x) {
; CHECK-LABEL: @zext_zext_not_commute(
; CHECK-NEXT: ret <2 x i5> <i5 7, i5 7>
;
@@ -2595,7 +2595,7 @@ define <2 x i5> @zext_zext_not_commute(<2 x i3> %x) {
ret <2 x i5> %r
}
-define i9 @sext_sext_not(i3 %x) {
+define i9 @sext_sext_not(i3 noundef %x) {
; CHECK-LABEL: @sext_sext_not(
; CHECK-NEXT: ret i9 -1
;
@@ -2606,7 +2606,7 @@ define i9 @sext_sext_not(i3 %x) {
ret i9 %r
}
-define i8 @sext_sext_not_commute(i3 %x) {
+define i8 @sext_sext_not_commute(i3 noundef %x) {
; CHECK-LABEL: @sext_sext_not_commute(
; CHECK-NEXT: [[SX:%.*]] = sext i3 [[X:%.*]] to i8
; CHECK-NEXT: call void @use(i8 [[SX]])
@@ -2621,7 +2621,7 @@ define i8 @sext_sext_not_commute(i3 %x) {
ret i8 %r
}
-define i5 @zext_sext_not(i4 %x) {
+define i5 @zext_sext_not(i4 noundef %x) {
; CHECK-LABEL: @zext_sext_not(
; CHECK-NEXT: [[ZX:%.*]] = zext i4 [[X:%.*]] to i5
; CHECK-NEXT: [[NOTX:%.*]] = xor i4 [[X]], -1
@@ -2636,7 +2636,7 @@ define i5 @zext_sext_not(i4 %x) {
ret i5 %r
}
-define i8 @zext_sext_not_commute(i4 %x) {
+define i8 @zext_sext_not_commute(i4 noundef %x) {
; CHECK-LABEL: @zext_sext_not_commute(
; CHECK-NEXT: [[ZX:%.*]] = zext i4 [[X:%.*]] to i8
; CHECK-NEXT: call void @use(i8 [[ZX]])
@@ -2655,7 +2655,7 @@ define i8 @zext_sext_not_commute(i4 %x) {
ret i8 %r
}
-define i9 @sext_zext_not(i4 %x) {
+define i9 @sext_zext_not(i4 noundef %x) {
; CHECK-LABEL: @sext_zext_not(
; CHECK-NEXT: [[SX:%.*]] = sext i4 [[X:%.*]] to i9
; CHECK-NEXT: [[NOTX:%.*]] = xor i4 [[X]], -1
@@ -2670,7 +2670,7 @@ define i9 @sext_zext_not(i4 %x) {
ret i9 %r
}
-define i9 @sext_zext_not_commute(i4 %x) {
+define i9 @sext_zext_not_commute(i4 noundef %x) {
; CHECK-LABEL: @sext_zext_not_commute(
; CHECK-NEXT: [[SX:%.*]] = sext i4 [[X:%.*]] to i9
; CHECK-NEXT: [[NOTX:%.*]] = xor i4 [[X]], -1
diff --git a/llvm/test/Transforms/InstCombine/and-or-not.ll b/llvm/test/Transforms/InstCombine/and-or-not.ll
index c896c8f100380..ca093eba1b568 100644
--- a/llvm/test/Transforms/InstCombine/and-or-not.ll
+++ b/llvm/test/Transforms/InstCombine/and-or-not.ll
@@ -548,7 +548,7 @@ define i32 @and_to_nxor_multiuse(float %fa, float %fb) {
; (a & b) | ~(a | b) --> ~(a ^ b)
; TODO: this increases instruction count if the pieces have additional users
-define i32 @or_to_nxor_multiuse(i32 %a, i32 %b) {
+define i32 @or_to_nxor_multiuse(i32 noundef %a, i32 noundef %b) {
; CHECK-LABEL: @or_to_nxor_multiuse(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[A]], [[B]]
diff --git a/llvm/test/Transforms/InstCombine/logical-select.ll b/llvm/test/Transforms/InstCombine/logical-select.ll
index 31848bc911892..c3eec5eebf2eb 100644
--- a/llvm/test/Transforms/InstCombine/logical-select.ll
+++ b/llvm/test/Transforms/InstCombine/logical-select.ll
@@ -762,7 +762,7 @@ define <vscale x 2 x i64> @bitcast_vec_cond_scalable(<vscale x 16 x i1> %cond, <
; Negative test - bitcast of condition from wide source element type cannot be converted to select.
-define <8 x i3> @bitcast_vec_cond_commute1(<3 x i1> %cond, <8 x i3> %pc, <8 x i3> %d) {
+define <8 x i3> @bitcast_vec_cond_commute1(<3 x i1> noundef %cond, <8 x i3> %pc, <8 x i3> %d) {
; CHECK-LABEL: @bitcast_vec_cond_commute1(
; CHECK-NEXT: [[C:%.*]] = mul <8 x i3> [[PC:%.*]], [[PC]]
; CHECK-NEXT: [[S:%.*]] = sext <3 x i1> [[COND:%.*]] to <3 x i8>
@@ -830,7 +830,7 @@ define <2 x i16> @bitcast_vec_cond_commute3(<4 x i8> %cond, <2 x i16> %pc, <2 x
; Don't crash on invalid type for compute signbits.
-define <2 x i64> @bitcast_fp_vec_cond(<2 x double> %s, <2 x i64> %c, <2 x i64> %d) {
+define <2 x i64> @bitcast_fp_vec_cond(<2 x double> noundef %s, <2 x i64> %c, <2 x i64> %d) {
; CHECK-LABEL: @bitcast_fp_vec_cond(
; CHECK-NEXT: [[T9:%.*]] = bitcast <2 x double> [[S:%.*]] to <2 x i64>
; CHECK-NEXT: [[NOTT9:%.*]] = xor <2 x i64> [[T9]], <i64 -1, i64 -1>
@@ -849,7 +849,7 @@ define <2 x i64> @bitcast_fp_vec_cond(<2 x double> %s, <2 x i64> %c, <2 x i64> %
; Wider source type would be ok except poison could leak across elements.
-define <2 x i64> @bitcast_int_vec_cond(i1 %b, <2 x i64> %c, <2 x i64> %d) {
+define <2 x i64> @bitcast_int_vec_cond(i1 noundef %b, <2 x i64> %c, <2 x i64> %d) {
; CHECK-LABEL: @bitcast_int_vec_cond(
; CHECK-NEXT: [[S:%.*]] = sext i1 [[B:%.*]] to i128
; CHECK-NEXT: [[T9:%.*]] = bitcast i128 [[S]] to <2 x i64>
diff --git a/llvm/test/Transforms/InstCombine/masked-merge-add.ll b/llvm/test/Transforms/InstCombine/masked-merge-add.ll
index c4265eb896b20..f655153108a43 100644
--- a/llvm/test/Transforms/InstCombine/masked-merge-add.ll
+++ b/llvm/test/Transforms/InstCombine/masked-merge-add.ll
@@ -16,7 +16,7 @@
; Most basic positive tests
; ============================================================================ ;
-define i32 @p(i32 %x, i32 %y, i32 %m) {
+define i32 @p(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
@@ -31,7 +31,7 @@ define i32 @p(i32 %x, i32 %y, i32 %m) {
ret i32 %ret
}
-define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {
+define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> noundef %m) {
; CHECK-LABEL: @p_splatvec(
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M]], <i32 -1, i32 -1>
@@ -46,7 +46,7 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {
ret <2 x i32> %ret
}
-define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> %m) {
+define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) {
; CHECK-LABEL: @p_vec_undef(
; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], <i32 -1, i32 undef, i32 -1>
@@ -180,7 +180,7 @@ define <3 x i32> @p_constmask2_vec_undef(<3 x i32> %x, <3 x i32> %y) {
; Used to make sure that the IR complexity sorting does not interfere.
declare i32 @gen32()
-define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative0(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative0(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
@@ -195,7 +195,7 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative1(i32 %x, i32 %m) {
+define i32 @p_commutative1(i32 %x, i32 noundef %m) {
; CHECK-LABEL: @p_commutative1(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
@@ -212,7 +212,7 @@ define i32 @p_commutative1(i32 %x, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative2(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative2(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
@@ -227,7 +227,7 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative3(i32 %x, i32 %m) {
+define i32 @p_commutative3(i32 %x, i32 noundef %m) {
; CHECK-LABEL: @p_commutative3(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
@@ -244,7 +244,7 @@ define i32 @p_commutative3(i32 %x, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative4(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative4(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
@@ -259,7 +259,7 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative5(i32 %x, i32 %m) {
+define i32 @p_commutative5(i32 %x, i32 noundef %m) {
; CHECK-LABEL: @p_commutative5(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
@@ -276,7 +276,7 @@ define i32 @p_commutative5(i32 %x, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative6(i32 %x, i32 %m) {
+define i32 @p_commutative6(i32 %x, i32 noundef %m) {
; CHECK-LABEL: @p_commutative6(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
@@ -314,7 +314,7 @@ define i32 @p_constmask_commutative(i32 %x, i32 %y) {
declare void @use32(i32)
-define i32 @n0_oneuse(i32 %x, i32 %y, i32 %m) {
+define i32 @n0_oneuse(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @n0_oneuse(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
diff --git a/llvm/test/Transforms/InstCombine/masked-merge-or.ll b/llvm/test/Transforms/InstCombine/masked-merge-or.ll
index 7d67b2d495412..b49ec07706e28 100644
--- a/llvm/test/Transforms/InstCombine/masked-merge-or.ll
+++ b/llvm/test/Transforms/InstCombine/masked-merge-or.ll
@@ -16,7 +16,7 @@
; Most basic positive tests
; ============================================================================ ;
-define i32 @p(i32 %x, i32 %y, i32 %m) {
+define i32 @p(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
@@ -31,7 +31,7 @@ define i32 @p(i32 %x, i32 %y, i32 %m) {
ret i32 %ret
}
-define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {
+define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> noundef %m) {
; CHECK-LABEL: @p_splatvec(
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M]], <i32 -1, i32 -1>
@@ -46,7 +46,7 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {
ret <2 x i32> %ret
}
-define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> %m) {
+define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) {
; CHECK-LABEL: @p_vec_undef(
; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], <i32 -1, i32 undef, i32 -1>
@@ -180,7 +180,7 @@ define <3 x i32> @p_constmask2_vec_undef(<3 x i32> %x, <3 x i32> %y) {
; Used to make sure that the IR complexity sorting does not interfere.
declare i32 @gen32()
-define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative0(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative0(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
@@ -195,7 +195,7 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative1(i32 %x, i32 %m) {
+define i32 @p_commutative1(i32 %x, i32 noundef %m) {
; CHECK-LABEL: @p_commutative1(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
@@ -212,7 +212,7 @@ define i32 @p_commutative1(i32 %x, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative2(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative2(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
@@ -227,7 +227,7 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative3(i32 %x, i32 %m) {
+define i32 @p_commutative3(i32 %x, i32 noundef %m) {
; CHECK-LABEL: @p_commutative3(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
@@ -244,7 +244,7 @@ define i32 @p_commutative3(i32 %x, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative4(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative4(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
@@ -259,7 +259,7 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative5(i32 %x, i32 %m) {
+define i32 @p_commutative5(i32 %x, i32 noundef %m) {
; CHECK-LABEL: @p_commutative5(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
@@ -276,7 +276,7 @@ define i32 @p_commutative5(i32 %x, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative6(i32 %x, i32 %m) {
+define i32 @p_commutative6(i32 %x, i32 noundef %m) {
; CHECK-LABEL: @p_commutative6(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
@@ -314,7 +314,7 @@ define i32 @p_constmask_commutative(i32 %x, i32 %y) {
declare void @use32(i32)
-define i32 @n0_oneuse(i32 %x, i32 %y, i32 %m) {
+define i32 @n0_oneuse(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @n0_oneuse(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
diff --git a/llvm/test/Transforms/InstCombine/masked-merge-xor.ll b/llvm/test/Transforms/InstCombine/masked-merge-xor.ll
index 4f0845c8c8f46..a6d201be68cee 100644
--- a/llvm/test/Transforms/InstCombine/masked-merge-xor.ll
+++ b/llvm/test/Transforms/InstCombine/masked-merge-xor.ll
@@ -16,7 +16,7 @@
; Most basic positive tests
; ============================================================================ ;
-define i32 @p(i32 %x, i32 %y, i32 %m) {
+define i32 @p(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
@@ -31,7 +31,7 @@ define i32 @p(i32 %x, i32 %y, i32 %m) {
ret i32 %ret
}
-define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {
+define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> noundef %m) {
; CHECK-LABEL: @p_splatvec(
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M]], <i32 -1, i32 -1>
@@ -46,7 +46,7 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {
ret <2 x i32> %ret
}
-define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> %m) {
+define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) {
; CHECK-LABEL: @p_vec_undef(
; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], <i32 -1, i32 undef, i32 -1>
@@ -180,7 +180,7 @@ define <3 x i32> @p_constmask2_vec_undef(<3 x i32> %x, <3 x i32> %y) {
; Used to make sure that the IR complexity sorting does not interfere.
declare i32 @gen32()
-define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative0(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative0(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
@@ -195,7 +195,7 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative1(i32 %x, i32 %m) {
+define i32 @p_commutative1(i32 %x, i32 noundef %m) {
; CHECK-LABEL: @p_commutative1(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
@@ -212,7 +212,7 @@ define i32 @p_commutative1(i32 %x, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative2(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative2(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
@@ -227,7 +227,7 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative3(i32 %x, i32 %m) {
+define i32 @p_commutative3(i32 %x, i32 noundef %m) {
; CHECK-LABEL: @p_commutative3(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
@@ -244,7 +244,7 @@ define i32 @p_commutative3(i32 %x, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative4(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative4(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
@@ -259,7 +259,7 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative5(i32 %x, i32 %m) {
+define i32 @p_commutative5(i32 %x, i32 noundef %m) {
; CHECK-LABEL: @p_commutative5(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
@@ -276,7 +276,7 @@ define i32 @p_commutative5(i32 %x, i32 %m) {
ret i32 %ret
}
-define i32 @p_commutative6(i32 %x, i32 %m) {
+define i32 @p_commutative6(i32 %x, i32 noundef %m) {
; CHECK-LABEL: @p_commutative6(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
@@ -314,7 +314,7 @@ define i32 @p_constmask_commutative(i32 %x, i32 %y) {
declare void @use32(i32)
-define i32 @n0_oneuse(i32 %x, i32 %y, i32 %m) {
+define i32 @n0_oneuse(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @n0_oneuse(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
diff --git a/llvm/test/Transforms/InstCombine/pr53357.ll b/llvm/test/Transforms/InstCombine/pr53357.ll
index bdb63068fd4ae..0a6d2993ce46a 100644
--- a/llvm/test/Transforms/InstCombine/pr53357.ll
+++ b/llvm/test/Transforms/InstCombine/pr53357.ll
@@ -3,7 +3,7 @@
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
; (x & y) + ~(x | y)
-define i32 @src(i32 %0, i32 %1) {
+define i32 @src(i32 noundef %0, i32 noundef %1) {
; CHECK-LABEL: @src(
; CHECK-NEXT: [[TMP3:%.*]] = xor i32 [[TMP1:%.*]], [[TMP0:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = xor i32 [[TMP3]], -1
@@ -17,7 +17,7 @@ define i32 @src(i32 %0, i32 %1) {
}
; vector version of src
-define <2 x i32> @src_vec(<2 x i32> %0, <2 x i32> %1) {
+define <2 x i32> @src_vec(<2 x i32> noundef %0, <2 x i32> noundef %1) {
; CHECK-LABEL: @src_vec(
; CHECK-NEXT: [[TMP3:%.*]] = xor <2 x i32> [[TMP1:%.*]], [[TMP0:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = xor <2 x i32> [[TMP3]], <i32 -1, i32 -1>
@@ -31,7 +31,7 @@ define <2 x i32> @src_vec(<2 x i32> %0, <2 x i32> %1) {
}
; vector version of src with undef values
-define <2 x i32> @src_vec_undef(<2 x i32> %0, <2 x i32> %1) {
+define <2 x i32> @src_vec_undef(<2 x i32> noundef %0, <2 x i32> noundef %1) {
; CHECK-LABEL: @src_vec_undef(
; CHECK-NEXT: [[TMP3:%.*]] = xor <2 x i32> [[TMP1:%.*]], [[TMP0:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = xor <2 x i32> [[TMP3]], <i32 -1, i32 -1>
@@ -45,7 +45,7 @@ define <2 x i32> @src_vec_undef(<2 x i32> %0, <2 x i32> %1) {
}
; (x & y) + ~(y | x)
-define i32 @src2(i32 %0, i32 %1) {
+define i32 @src2(i32 noundef %0, i32 noundef %1) {
; CHECK-LABEL: @src2(
; CHECK-NEXT: [[TMP3:%.*]] = xor i32 [[TMP1:%.*]], [[TMP0:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = xor i32 [[TMP3]], -1
@@ -59,7 +59,7 @@ define i32 @src2(i32 %0, i32 %1) {
}
; (x & y) + (~x & ~y)
-define i32 @src3(i32 %0, i32 %1) {
+define i32 @src3(i32 noundef %0, i32 noundef %1) {
; CHECK-LABEL: @src3(
; CHECK-NEXT: [[TMP3:%.*]] = xor i32 [[TMP1:%.*]], [[TMP0:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = xor i32 [[TMP3]], -1
@@ -74,7 +74,7 @@ define i32 @src3(i32 %0, i32 %1) {
}
; ~(x | y) + (y & x)
-define i32 @src4(i32 %0, i32 %1) {
+define i32 @src4(i32 noundef %0, i32 noundef %1) {
; CHECK-LABEL: @src4(
; CHECK-NEXT: [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = xor i32 [[TMP3]], -1
@@ -88,7 +88,7 @@ define i32 @src4(i32 %0, i32 %1) {
}
; ~(x | y) + (x & y)
-define i32 @src5(i32 %0, i32 %1) {
+define i32 @src5(i32 noundef %0, i32 noundef %1) {
; CHECK-LABEL: @src5(
; CHECK-NEXT: [[TMP3:%.*]] = xor i32 [[TMP1:%.*]], [[TMP0:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = xor i32 [[TMP3]], -1
diff --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp
index 1190550340efb..0d3a594da0c06 100644
--- a/llvm/unittests/Analysis/ValueTrackingTest.cpp
+++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp
@@ -2604,7 +2604,7 @@ TEST_F(ValueTrackingTest, HaveNoCommonBitsSet) {
{
// Check for an inverted mask: (X & ~M) op (Y & M).
auto M = parseModule(R"(
- define i32 @test(i32 %X, i32 %Y, i32 %M) {
+ define i32 @test(i32 %X, i32 %Y, i32 noundef %M) {
%1 = xor i32 %M, -1
%LHS = and i32 %1, %X
%RHS = and i32 %Y, %M
@@ -2623,7 +2623,7 @@ TEST_F(ValueTrackingTest, HaveNoCommonBitsSet) {
{
// Check for (A & B) and ~(A | B)
auto M = parseModule(R"(
- define void @test(i32 %A, i32 %B) {
+ define void @test(i32 noundef %A, i32 noundef %B) {
%LHS = and i32 %A, %B
%or = or i32 %A, %B
%RHS = xor i32 %or, -1
@@ -2651,7 +2651,7 @@ TEST_F(ValueTrackingTest, HaveNoCommonBitsSet) {
{
// Check for (A & B) and ~(A | B) in vector version
auto M = parseModule(R"(
- define void @test(<2 x i32> %A, <2 x i32> %B) {
+ define void @test(<2 x i32> noundef %A, <2 x i32> noundef %B) {
%LHS = and <2 x i32> %A, %B
%or = or <2 x i32> %A, %B
%RHS = xor <2 x i32> %or, <i32 -1, i32 -1>
>From ed7af7c4e3f9209c9b574b279152c99371b662ba Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 4 Dec 2023 16:58:17 -0800
Subject: [PATCH 2/2] fixup! clang-format
---
llvm/lib/Analysis/ValueTracking.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index ad1806ee231cf..0a0882993191b 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -186,8 +186,7 @@ KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
}
-static bool haveNoCommonBitsSetSpecialCases(const Value *LHS,
- const Value *RHS,
+static bool haveNoCommonBitsSetSpecialCases(const Value *LHS, const Value *RHS,
const SimplifyQuery &SQ) {
// Look for an inverted mask: (X & ~M) op (Y & M).
{
@@ -206,7 +205,8 @@ static bool haveNoCommonBitsSetSpecialCases(const Value *LHS,
// X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern
// for constant Y.
Value *Y;
- if (match(RHS, m_c_Xor(m_c_And(m_Specific(LHS), m_Value(Y)), m_Deferred(Y))) &&
+ if (match(RHS,
+ m_c_Xor(m_c_And(m_Specific(LHS), m_Value(Y)), m_Deferred(Y))) &&
isGuaranteedNotToBeUndef(LHS, SQ.AC, SQ.CxtI, SQ.DT) &&
isGuaranteedNotToBeUndef(Y, SQ.AC, SQ.CxtI, SQ.DT))
return true;
More information about the llvm-commits
mailing list